mirror of
https://github.com/1Password/onepassword-operator.git
synced 2025-10-21 23:18:06 +00:00
Compare commits
67 Commits
goreleaser
...
inject-web
Author | SHA1 | Date | |
---|---|---|---|
![]() |
a8e6a4a4f1 | ||
![]() |
a5f4a7a0c1 | ||
![]() |
b35c668959 | ||
![]() |
2fa035022c | ||
![]() |
d715a6ed0e | ||
![]() |
f439b04415 | ||
![]() |
ee12dd449a | ||
![]() |
e6ca8d49ba | ||
![]() |
f974d3f398 | ||
![]() |
d807e92c36 | ||
![]() |
244771717c | ||
![]() |
7aeb36e383 | ||
![]() |
5c2f840623 | ||
![]() |
670040477e | ||
![]() |
a45a310611 | ||
![]() |
d80e8dd799 | ||
![]() |
88728909ff | ||
![]() |
e365ebfdfa | ||
![]() |
2c4b4df01a | ||
![]() |
49d984c6f2 | ||
![]() |
72cad7284c | ||
![]() |
0193a98681 | ||
![]() |
f241d7423d | ||
![]() |
6043e0da0b | ||
![]() |
753cc5e9a3 | ||
![]() |
8cfe98073e | ||
![]() |
c0037526b0 | ||
![]() |
96b42e7c52 | ||
![]() |
579b5848da | ||
![]() |
dff934cbc3 | ||
![]() |
2096f4440f | ||
![]() |
b3fc707337 | ||
![]() |
b50d864b50 | ||
![]() |
1643385d9b | ||
![]() |
9441214733 | ||
![]() |
7e4e988813 | ||
![]() |
fb1262f1bd | ||
![]() |
68f084080e | ||
![]() |
a428fe7462 | ||
![]() |
ea2d1f8a09 | ||
![]() |
bd96d50a9b | ||
![]() |
859c9e3462 | ||
![]() |
9dabac4a55 | ||
![]() |
d927a08790 | ||
![]() |
933f7c4e2c | ||
![]() |
81eb9a521f | ||
![]() |
eb32bd7f94 | ||
![]() |
a5781af949 | ||
![]() |
0aa5781acd | ||
![]() |
700be4426f | ||
![]() |
76ef9aa372 | ||
![]() |
d7e6704314 | ||
![]() |
2443979602 | ||
![]() |
5b65196d31 | ||
![]() |
e7df8a485d | ||
![]() |
ded76138da | ||
![]() |
a5db6aeb81 | ||
![]() |
d45f682c37 | ||
![]() |
d0c1235e58 | ||
![]() |
9e8f621020 | ||
![]() |
8dd7a28456 | ||
![]() |
43b06dd7aa | ||
![]() |
e8e01d6578 | ||
![]() |
b53e017b77 | ||
![]() |
b2565cebf8 | ||
![]() |
9459d2e292 | ||
![]() |
0409b17ef4 |
36
.github/ISSUE_TEMPLATE/bug_report.md
vendored
Normal file
36
.github/ISSUE_TEMPLATE/bug_report.md
vendored
Normal file
@@ -0,0 +1,36 @@
|
||||
---
|
||||
name: Bug report
|
||||
about: Report bugs and errors found while using the Operator.
|
||||
title: ''
|
||||
labels: bug
|
||||
assignees: ''
|
||||
|
||||
---
|
||||
|
||||
### Your environment
|
||||
|
||||
<!-- Version of the Operator when the error occurred -->
|
||||
Operator Version:
|
||||
|
||||
<!-- What version of the Connect server are you running?
|
||||
You can get this information from the Integrations section in 1Password
|
||||
https://start.1password.com/integrations/active
|
||||
-->
|
||||
Connect Server Version:
|
||||
|
||||
<!-- What version of Kubernetes have you deployed the operator to? -->
|
||||
Kubernetes Version:
|
||||
|
||||
## What happened?
|
||||
<!-- Describe the bug or error -->
|
||||
|
||||
## What did you expect to happen?
|
||||
<!-- Describe what should have happened -->
|
||||
|
||||
## Steps to reproduce
|
||||
1. <!-- Describe Steps to reproduce the issue -->
|
||||
|
||||
|
||||
## Notes & Logs
|
||||
<!-- Paste any logs here that may help with debugging.
|
||||
Remember to remove any sensitive information before sharing! -->
|
9
.github/ISSUE_TEMPLATE/config.yml
vendored
Normal file
9
.github/ISSUE_TEMPLATE/config.yml
vendored
Normal file
@@ -0,0 +1,9 @@
|
||||
# docs: https://docs.github.com/en/communities/using-templates-to-encourage-useful-issues-and-pull-requests/configuring-issue-templates-for-your-repository#configuring-the-template-chooser
|
||||
blank_issues_enabled: true
|
||||
contact_links:
|
||||
- name: 1Password Community
|
||||
url: https://1password.community/categories/secrets-automation
|
||||
about: Please ask general Secrets Automation questions here.
|
||||
- name: 1Password Security Bug Bounty
|
||||
url: https://bugcrowd.com/agilebits
|
||||
about: Please report security vulnerabilities here.
|
32
.github/ISSUE_TEMPLATE/feature_request.md
vendored
Normal file
32
.github/ISSUE_TEMPLATE/feature_request.md
vendored
Normal file
@@ -0,0 +1,32 @@
|
||||
---
|
||||
name: Feature request
|
||||
about: Suggest an idea for the Operator
|
||||
title: ''
|
||||
labels: feature-request
|
||||
assignees: ''
|
||||
|
||||
---
|
||||
|
||||
### Summary
|
||||
<!-- Briefly describe the feature in one or two sentences. You can include more details later. -->
|
||||
|
||||
### Use cases
|
||||
<!-- Describe the use cases that make this feature useful to others.
|
||||
The description should help the reader understand why the feature is necessary.
|
||||
The better we understand your use case, the better we can help create an appropriate solution. -->
|
||||
|
||||
### Proposed solution
|
||||
<!-- If you already have an idea for how the feature should work, use this space to describe it.
|
||||
We'll work with you to find a workable approach, and any implementation details are appreciated.
|
||||
-->
|
||||
|
||||
### Is there a workaround to accomplish this today?
|
||||
<!-- If there's a way to accomplish this feature request without changes to the codebase, we'd like to hear it.
|
||||
-->
|
||||
|
||||
### References & Prior Work
|
||||
<!-- If a similar feature was implemented in another project or tool, add a link so we can better understand your request.
|
||||
Links to relevant documentation or RFCs are also appreciated. -->
|
||||
|
||||
* <!-- Reference 1 -->
|
||||
* <!-- Reference 2, etc -->
|
52
.github/workflows/release.yml
vendored
52
.github/workflows/release.yml
vendored
@@ -1,13 +1,15 @@
|
||||
name: goreleaser
|
||||
name: release
|
||||
|
||||
on:
|
||||
push:
|
||||
tags:
|
||||
- '*'
|
||||
- 'v*'
|
||||
|
||||
jobs:
|
||||
goreleaser:
|
||||
release-docker:
|
||||
runs-on: ubuntu-latest
|
||||
env:
|
||||
DOCKER_CLI_EXPERIMENTAL: "enabled"
|
||||
steps:
|
||||
-
|
||||
name: Checkout
|
||||
@@ -15,15 +17,41 @@ jobs:
|
||||
with:
|
||||
fetch-depth: 0
|
||||
-
|
||||
name: Set up Go
|
||||
uses: actions/setup-go@v2
|
||||
name: Docker meta
|
||||
id: meta
|
||||
uses: crazy-max/ghaction-docker-meta@v2
|
||||
with:
|
||||
go-version: 1.15
|
||||
images: |
|
||||
1password/onepassword-operator
|
||||
# Publish image for x.y.z and x.y
|
||||
# The latest tag is automatically added for semver tags
|
||||
tags: |
|
||||
type=semver,pattern={{version}}
|
||||
type=semver,pattern={{major}}.{{minor}}
|
||||
- name: Get the version from tag
|
||||
id: get_version
|
||||
run: echo ::set-output name=VERSION::${GITHUB_REF#refs/tags/v}
|
||||
-
|
||||
name: Run GoReleaser
|
||||
uses: goreleaser/goreleaser-action@v2
|
||||
name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v1
|
||||
-
|
||||
name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v1
|
||||
-
|
||||
name: Docker Login
|
||||
uses: docker/login-action@v1
|
||||
with:
|
||||
version: latest
|
||||
args: release --rm-dist
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
-
|
||||
name: Build and push
|
||||
uses: docker/build-push-action@v2
|
||||
with:
|
||||
context: .
|
||||
file: Dockerfile
|
||||
platforms: linux/amd64,linux/arm64,linux/arm/v7
|
||||
push: true
|
||||
tags: ${{ steps.meta.outputs.tags }}
|
||||
labels: ${{ steps.meta.outputs.labels }}
|
||||
build-args: |
|
||||
operator_version=${{ steps.get_version.outputs.VERSION }}
|
||||
|
25
CHANGELOG.md
25
CHANGELOG.md
@@ -12,6 +12,31 @@
|
||||
|
||||
---
|
||||
|
||||
[//]: # (START/v1.1.0)
|
||||
# v1.1.0
|
||||
|
||||
## Fixes
|
||||
* Fix normalization for keys in a Secret's `data` section to allow upper- and lower-case alphanumeric characters. {#66}
|
||||
|
||||
---
|
||||
|
||||
[//]: # (START/v1.0.2)
|
||||
# v1.0.2
|
||||
|
||||
## Fixes
|
||||
* Name normalizer added to handle non-conforming item names.
|
||||
|
||||
---
|
||||
|
||||
[//]: # (START/v1.0.1)
|
||||
# v1.0.1
|
||||
|
||||
## Features
|
||||
* This release also contains an arm64 Docker image. {#20}
|
||||
* Docker images are also pushed to the :latest and :<major>.<minor> tags.
|
||||
|
||||
---
|
||||
|
||||
[//]: # (START/v1.0.0)
|
||||
# v1.0.0
|
||||
|
||||
|
31
Makefile
31
Makefile
@@ -11,7 +11,9 @@ versionFile = $(CURDIR)/.VERSION
|
||||
curVersion := $(shell cat $(versionFile) | sed 's/^v//')
|
||||
|
||||
OPERATOR_NAME := onepassword-connect-operator
|
||||
DOCKER_IMG_TAG ?= $(OPERATOR_NAME):v$(curVersion)
|
||||
OPERATOR_NAME := onepassword-secrets-injector
|
||||
OPERATOR_DOCKER_IMG_TAG ?= $(OPERATOR_NAME):v$(curVersion)
|
||||
INJECTOR_DOCKER_IMG_TAG ?= $(OPERATOR_NAME):v$(curVersion)
|
||||
|
||||
test: ## Run test suite
|
||||
go test ./...
|
||||
@@ -19,17 +21,30 @@ test: ## Run test suite
|
||||
test/coverage: ## Run test suite with coverage report
|
||||
go test -v ./... -cover
|
||||
|
||||
build: ## Build operator Docker image
|
||||
@docker build -f Dockerfile --build-arg operator_version=$(curVersion) -t $(DOCKER_IMG_TAG)
|
||||
build/operator: ## Build operator Docker image
|
||||
@docker build -f operator/Dockerfile --build-arg operator_version=$(curVersion) -t $(OPERATOR_DOCKER_IMG_TAG) .
|
||||
@echo "Successfully built and tagged image."
|
||||
@echo "Tag: $(DOCKER_IMG_TAG)"
|
||||
@echo "Tag: $(OPERATOR_DOCKER_IMG_TAG)"
|
||||
|
||||
build/local: ## Build local version of the operator Docker image
|
||||
@docker build -f Dockerfile -t local/$(DOCKER_IMG_TAG)
|
||||
build/operator/local: ## Build local version of the operator Docker image
|
||||
@docker build -f operator/Dockerfile -t local/$(OPERATOR_DOCKER_IMG_TAG) .
|
||||
|
||||
build/binary: clean ## Build operator binary
|
||||
build/operator/binary: clean ## Build operator binary
|
||||
@mkdir -p dist
|
||||
@go build -mod vendor -a -o manager ./cmd/manager/main.go
|
||||
@go build -mod vendor -a -o manager ./operator/cmd/manager/main.go
|
||||
@mv manager ./dist
|
||||
|
||||
build/secret-injector: ## Build secret-injector Docker image
|
||||
@docker build -f secret-injector/Dockerfile --build-arg operator_version=$(curVersion) -t $(INJECTOR_DOCKER_IMG_TAG) .
|
||||
@echo "Successfully built and tagged image."
|
||||
@echo "Tag: $(INJECTOR_DOCKER_IMG_TAG)"
|
||||
|
||||
build/secret-injector/local: ## Build local version of the secret-injector Docker image
|
||||
@docker build -f secret-injector/Dockerfile -t local/$(INJECTOR_DOCKER_IMG_TAG) .
|
||||
|
||||
build/secret-injector/binary: clean ## Build secret-injector binary
|
||||
@mkdir -p dist
|
||||
@go build -mod vendor -a -o manager ./secret-injector/cmd/manager/main.go
|
||||
@mv manager ./dist
|
||||
|
||||
clean:
|
||||
|
221
README.md
221
README.md
@@ -1,4 +1,8 @@
|
||||
# 1Password Connect Kubernetes Operator
|
||||
# 1Password for Kubernetes
|
||||
|
||||
This repository includes various tooling for integrating 1Password secrets wtih Kubernetes.
|
||||
|
||||
## 1Password Connect Kubernetes Operator
|
||||
|
||||
The 1Password Connect Kubernetes Operator provides the ability to integrate Kubernetes with 1Password. This Operator manages `OnePasswordItem` Custom Resource Definitions (CRDs) that define the location of an Item stored in 1Password. The `OnePasswordItem` CRD, when created, will be used to compose a Kubernetes Secret containing the contents of the specified item.
|
||||
|
||||
@@ -6,222 +10,17 @@ The 1Password Connect Kubernetes Operator also allows for Kubernetes Secrets to
|
||||
|
||||
The 1Password Connect Kubernetes Operator will continually check for updates from 1Password for any Kubernetes Secret that it has generated. If a Kubernetes Secret is updated, any Deployment using that secret can be automatically restarted.
|
||||
|
||||
## Setup
|
||||
[Click here for more details on the 1Password Kubernetes Operator](operator/README.md)
|
||||
|
||||
Prerequisites:
|
||||
## 1Password Secret Injector
|
||||
|
||||
- [1Password Command Line Tool Installed](https://1password.com/downloads/command-line/)
|
||||
- [kubectl installed](https://kubernetes.io/docs/tasks/tools/install-kubectl/)
|
||||
- [docker installed](https://docs.docker.com/get-docker/)
|
||||
- [Generated a 1password-credentials.json file and issued a 1Password Connect API Token for the K8s Operator integration](https://support.b5dev.com/cs/connect)
|
||||
- [1Password Connect deployed to Kubernetes](https://support.b5dev.com/cs/connect-deploy-kubernetes/#step-2-deploy-a-connect-server). **NOTE**: If customization of the 1Password Connect deployment is not required you can skip this prerequisite.
|
||||
|
||||
### Quickstart for Deploying 1Password Connect to Kubernetes
|
||||
[Click here for more details on the 1Password Secret Injector](secret-injector/README.md)
|
||||
|
||||
|
||||
#### Deploy with Helm
|
||||
The 1Password Connect Helm Chart helps to simplify the deployment of 1Password Connect and the 1Password Connect Kubernetes Operator to Kubernetes.
|
||||
|
||||
[The 1Password Connect Helm Chart can be found here.](https://github.com/1Password/connect-helm-charts)
|
||||
|
||||
#### Deploy using the Connect Operator
|
||||
If 1Password Connect is already running, you can skip this step. This guide will provide a quickstart option for deploying a default configuration of 1Password Connect via starting the deploying the 1Password Connect Operator, however it is recommended that you instead deploy your own manifest file if customization of the 1Password Connect deployment is desired.
|
||||
|
||||
Encode the 1password-credentials.json file you generated in the prerequisite steps and save it to a file named op-session:
|
||||
|
||||
```bash
|
||||
$ cat 1password-credentials.json | base64 | \
|
||||
tr '/+' '_-' | tr -d '=' | tr -d '\n' > op-session
|
||||
```
|
||||
|
||||
Create a Kubernetes secret from the op-session file:
|
||||
```bash
|
||||
|
||||
$ kubectl create secret generic op-credentials --from-file=1password-credentials.json
|
||||
```
|
||||
|
||||
Add the following environment variable to the onepassword-connect-operator container in `deploy/operator.yaml`:
|
||||
```yaml
|
||||
- name: MANAGE_CONNECT
|
||||
value: "true"
|
||||
```
|
||||
Adding this environment variable will have the operator automatically deploy a default configuration of 1Password Connect to the `default` namespace.
|
||||
### Kubernetes Operator Deployment
|
||||
|
||||
**Create Kubernetes Secret for OP_CONNECT_TOKEN**
|
||||
|
||||
"Create a Connect token for the operator and save it as a Kubernetes Secret:
|
||||
|
||||
```bash
|
||||
$ kubectl create secret generic op-operator-connect-token --from-literal=token=<OP_CONNECT_TOKEN>"
|
||||
```
|
||||
|
||||
If you do not have a token for the operator, you can generate a token and save it to kubernetes with the following command:
|
||||
```bash
|
||||
$ kubectl create secret generic op-operator-connect-token --from-literal=token=$(op create connect token <server> op-k8s-operator --vault <vault>)
|
||||
```
|
||||
|
||||
[More information on generating a token can be found here](https://support.1password.com/cs/secrets-automation/#appendix-issue-additional-access-tokens)
|
||||
|
||||
**Set Permissions For Operator**
|
||||
|
||||
We must create a service account, role, and role binding and Kubernetes. Examples can be found in the `/deploy` folder.
|
||||
|
||||
```bash
|
||||
$ kubectl apply -f deploy/permissions.yaml
|
||||
```
|
||||
|
||||
**Create Custom One Password Secret Resource**
|
||||
|
||||
```bash
|
||||
$ kubectl apply -f deploy/crds/onepassword.com_onepassworditems_crd.yaml
|
||||
```
|
||||
|
||||
**Deploying the Operator**
|
||||
|
||||
An sample Deployment yaml can be found at `/deploy/operator.yaml`.
|
||||
|
||||
|
||||
To further configure the 1Password Kubernetes Operator the Following Environment variables can be set in the operator yaml:
|
||||
|
||||
- **WATCH_NAMESPACE:** comma separated list of what Namespaces to watch for changes.
|
||||
- **OP_CONNECT_HOST** (required): Specifies the host name within Kubernetes in which to access the 1Password Connect.
|
||||
- **POLLING_INTERVAL** (default: 600)**:** The number of seconds the 1Password Kubernetes Operator will wait before checking for updates from 1Password Connect.
|
||||
- **MANAGE_CONNECT** (default: false): If set to true, on deployment of the operator, a default configuration of the OnePassword Connect Service will be deployed to the `default` namespace.
|
||||
- **AUTO_RESTART** (default: false): If set to true, the operator will restart any deployment using a secret from 1Password Connect. This can be overwritten by namespace, deployment, or individual secret. More details on AUTO_RESTART can be found in the ["Configuring Automatic Rolling Restarts of Deployments"](#configuring-automatic-rolling-restarts-of-deployments) section.
|
||||
|
||||
Apply the deployment file:
|
||||
|
||||
```yaml
|
||||
kubectl apply -f deploy/operator.yaml
|
||||
```
|
||||
|
||||
## Usage
|
||||
|
||||
To create a Kubernetes Secret from a 1Password item, create a yaml file with the following
|
||||
|
||||
```yaml
|
||||
apiVersion: onepassword.com/v1
|
||||
kind: OnePasswordItem # {insert_new_name}
|
||||
metadata:
|
||||
name: <item_name> #this name will also be used for naming the generated kubernetes secret
|
||||
spec:
|
||||
itemPath: "vaults/<vault_id_or_title>/items/<item_id_or_title>"
|
||||
```
|
||||
|
||||
Deploy the OnePasswordItem to Kubernetes:
|
||||
|
||||
```bash
|
||||
$ kubectl apply -f <your_item>.yaml
|
||||
```
|
||||
|
||||
To test that the Kubernetes Secret check that the following command returns a secret:
|
||||
|
||||
```bash
|
||||
$ kubectl get secret <secret_name>
|
||||
```
|
||||
|
||||
Note: Deleting the `OnePasswordItem` that you've created will automatically delete the created Kubernetes Secret.
|
||||
|
||||
To create a single Kubernetes Secret for a deployment, add the following annotations to the deployment metadata:
|
||||
|
||||
```yaml
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: deployment-example
|
||||
annotations:
|
||||
operator.1password.io/item-path: "vaults/{vault_id_or_title}/items/{item_id_or_title}"
|
||||
operator.1password.io/item-name: "{secret_name}"
|
||||
```
|
||||
|
||||
Applying this yaml file will create a Kubernetes Secret with the name `<secret_name>` and contents from the location specified at the specified Item Path.
|
||||
|
||||
Note: Deleting the Deployment that you've created will automatically delete the created Kubernetes Secret only if the deployment is still annotated with `operator.1password.io/item-path` and `operator.1password.io/item-name` and no other deployment is using the secret.
|
||||
|
||||
If a 1Password Item that is linked to a Kubernetes Secret is updated within the POLLING_INTERVAL the associated Kubernetes Secret will be updated. However, if you do not want a specific secret to be updated you can add the tag `operator.1password.io:ignore-secret` to the item stored in 1Password. While this tag is in place, any updates made to an item will not trigger an update to the associated secret in Kubernetes.
|
||||
|
||||
---
|
||||
**NOTE**
|
||||
|
||||
If multiple 1Password vaults/items have the same `title` when using a title in the access path, the desired action will be performed on the oldest vault/item. Furthermore, titles that include white space characters cannot be used.
|
||||
|
||||
---
|
||||
|
||||
### Configuring Automatic Rolling Restarts of Deployments
|
||||
|
||||
If a 1Password Item that is linked to a Kubernetes Secret is updated, any deployments configured to `auto-restart` AND are using that secret will be given a rolling restart the next time 1Password Connect is polled for updates.
|
||||
|
||||
There are many levels of granularity on which to configure auto restarts on deployments: at the operator level, per-namespace, or per-deployment.
|
||||
|
||||
**On the operator**: This method allows for managing auto restarts on all deployments within the namespaces watched by operator. Auto restarts can be enabled by setting the environemnt variable `AUTO_RESTART` to true. If the value is not set, the operator will default this value to false.
|
||||
|
||||
**Per Namespace**: This method allows for managing auto restarts on all deployments within a namespace. Auto restarts can by managed by setting the annotation `operator.1password.io/auto-restart` to either `true` or `false` on the desired namespace. An example of this is shown below:
|
||||
```yaml
|
||||
# enabled auto restarts for all deployments within a namespace unless overwritten within a deployment
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: "example-namespace"
|
||||
operator.1password.io/auto-restart: "true"
|
||||
```
|
||||
If the value is not set, the auto reset settings on the operator will be used. This value can be overwritten by deployment.
|
||||
|
||||
**Per Deployment**
|
||||
This method allows for managing auto restarts on a given deployment. Auto restarts can by managed by setting the annotation `operator.1password.io/auto-restart` to either `true` or `false` on the desired deployment. An example of this is shown below:
|
||||
```yaml
|
||||
# enabled auto restarts for the deployment
|
||||
apiVersion: v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: "example-deployment"
|
||||
operator.1password.io/auto-restart: "true"
|
||||
```
|
||||
If the value is not set, the auto reset settings on the namespace will be used.
|
||||
|
||||
**Per OnePasswordItem Custom Resource**
|
||||
This method allows for managing auto restarts on a given OnePasswordItem custom resource. Auto restarts can by managed by setting the annotation `operator.1password.io/auto_restart` to either `true` or `false` on the desired OnePasswordItem. An example of this is shown below:
|
||||
```yaml
|
||||
# enabled auto restarts for the OnePasswordItem
|
||||
apiVersion: onepassword.com/v1
|
||||
kind: OnePasswordItem
|
||||
metadata:
|
||||
name: example
|
||||
operator.1password.io/auto-restart: "true"
|
||||
```
|
||||
If the value is not set, the auto reset settings on the deployment will be used.
|
||||
|
||||
## Development
|
||||
|
||||
### Creating a Docker image
|
||||
|
||||
To create a local version of the Docker image for testing, use the following `Makefile` target:
|
||||
```shell
|
||||
make build/local
|
||||
```
|
||||
|
||||
### Building the Operator binary
|
||||
```shell
|
||||
make build/binary
|
||||
```
|
||||
|
||||
The binary will be placed inside a `dist` folder within this repository.
|
||||
|
||||
### Running Tests
|
||||
|
||||
```shell
|
||||
make test
|
||||
```
|
||||
|
||||
With coverage:
|
||||
```shell
|
||||
make test/coverage
|
||||
```
|
||||
|
||||
## Security
|
||||
# Security
|
||||
|
||||
1Password requests you practice responsible disclosure if you discover a vulnerability.
|
||||
|
||||
Please file requests via [**BugCrowd**](https://bugcrowd.com/agilebits).
|
||||
|
||||
For information about security practices, please visit our [Security homepage](https://bugcrowd.com/agilebits).
|
||||
For information about security practices, please visit our [Security homepage](https://bugcrowd.com/agilebits).
|
||||
|
@@ -1,15 +0,0 @@
|
||||
FROM registry.access.redhat.com/ubi8/ubi-minimal:latest
|
||||
|
||||
ENV OPERATOR=/usr/local/bin/onepassword-connect-operator \
|
||||
USER_UID=1001 \
|
||||
USER_NAME=onepassword-connect-operator
|
||||
|
||||
# install operator binary
|
||||
COPY build/_output/bin/op-kubernetes-connect-operator ${OPERATOR}
|
||||
|
||||
COPY build/bin /usr/local/bin
|
||||
RUN /usr/local/bin/user_setup
|
||||
|
||||
ENTRYPOINT ["/usr/local/bin/entrypoint"]
|
||||
|
||||
USER ${USER_UID}
|
@@ -1,3 +0,0 @@
|
||||
#!/bin/sh -e
|
||||
|
||||
exec ${OPERATOR} $@
|
@@ -1,11 +0,0 @@
|
||||
#!/bin/sh
|
||||
set -x
|
||||
|
||||
# ensure $HOME exists and is accessible by group 0 (we don't know what the runtime UID will be)
|
||||
echo "${USER_NAME}:x:${USER_UID}:0:${USER_NAME} user:${HOME}:/sbin/nologin" >> /etc/passwd
|
||||
mkdir -p "${HOME}"
|
||||
chown "${USER_UID}:0" "${HOME}"
|
||||
chmod ug+rwx "${HOME}"
|
||||
|
||||
# no need for this script to remain in the image after running
|
||||
rm "$0"
|
4
go.mod
4
go.mod
@@ -4,14 +4,18 @@ go 1.13
|
||||
|
||||
require (
|
||||
github.com/1Password/connect-sdk-go v1.0.1
|
||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b
|
||||
github.com/operator-framework/operator-sdk v0.19.0
|
||||
github.com/prometheus/common v0.14.0 // indirect
|
||||
github.com/spf13/pflag v1.0.5
|
||||
github.com/stretchr/testify v1.6.1
|
||||
github.com/suborbital/grav v0.4.1
|
||||
github.com/suborbital/vektor v0.5.0
|
||||
k8s.io/api v0.18.2
|
||||
k8s.io/apimachinery v0.18.2
|
||||
k8s.io/client-go v12.0.0+incompatible
|
||||
k8s.io/kubectl v0.18.2
|
||||
k8s.io/kubernetes v1.13.0
|
||||
sigs.k8s.io/controller-runtime v0.6.0
|
||||
)
|
||||
|
||||
|
70
go.sum
70
go.sum
@@ -68,6 +68,7 @@ github.com/Masterminds/vcs v1.13.1/go.mod h1:N09YCmOQr6RLxC6UNHzuVwAdodYbbnycGHS
|
||||
github.com/Microsoft/go-winio v0.4.11/go.mod h1:VhR8bwka0BXejwEJY73c50VrPtXAaKcyvVC4A4RozmA=
|
||||
github.com/Microsoft/go-winio v0.4.15-0.20190919025122-fc70bd9a86b5/go.mod h1:tTuCMEN+UleMWgg9dVx4Hu52b1bJo+59jBh3ajtinzw=
|
||||
github.com/Microsoft/hcsshim v0.8.7/go.mod h1:OHd7sQqRFrYd3RmSgbgji+ctCwkbq2wbEYNSzOYtcBQ=
|
||||
github.com/Microsoft/hcsshim v0.8.9 h1:VrfodqvztU8YSOvygU+DN1BGaSGxmrNfqOv5oOuX2Bk=
|
||||
github.com/Microsoft/hcsshim v0.8.9/go.mod h1:5692vkUqntj1idxauYlpoINNKeqCiG6Sg38RRsjT5y8=
|
||||
github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ=
|
||||
github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c=
|
||||
@@ -158,9 +159,11 @@ github.com/containerd/cgroups v0.0.0-20190919134610-bf292b21730f/go.mod h1:OApqh
|
||||
github.com/containerd/console v0.0.0-20180822173158-c12b1e7919c1/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw=
|
||||
github.com/containerd/containerd v1.2.7/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
|
||||
github.com/containerd/containerd v1.3.0-beta.2.0.20190828155532-0293cbd26c69/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
|
||||
github.com/containerd/containerd v1.3.2 h1:ForxmXkA6tPIvffbrDAcPUIB32QgXkt2XFj+F0UxetA=
|
||||
github.com/containerd/containerd v1.3.2/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
|
||||
github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y=
|
||||
github.com/containerd/continuity v0.0.0-20200107194136-26c1120b8d41/go.mod h1:Dq467ZllaHgAtVp4p1xUQWBrFXR9s/wyoTpG8zOJGkY=
|
||||
github.com/containerd/continuity v0.0.0-20200413184840-d3ef23f19fbb h1:nXPkFq8X1a9ycY3GYQpFNxHh3j2JgY7zDZfq2EXMIzk=
|
||||
github.com/containerd/continuity v0.0.0-20200413184840-d3ef23f19fbb/go.mod h1:Dq467ZllaHgAtVp4p1xUQWBrFXR9s/wyoTpG8zOJGkY=
|
||||
github.com/containerd/fifo v0.0.0-20190226154929-a9fb20d87448/go.mod h1:ODA38xgv3Kuk8dQz2ZQXpnv/UZZUHUCL7pnLehbXgQI=
|
||||
github.com/containerd/go-runc v0.0.0-20180907222934-5a6d9f37cfa3/go.mod h1:IV7qH3hrUgRmyYrtgEeGWJfWbgcHL9CSRruz2Vqcph0=
|
||||
@@ -220,16 +223,19 @@ github.com/dnaeon/go-vcr v1.0.1/go.mod h1:aBB1+wY4s93YsC3HHjMBMrwTj2R9FHDzUr9KyG
|
||||
github.com/docker/cli v0.0.0-20200130152716-5d0cf8839492/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
|
||||
github.com/docker/distribution v0.0.0-20191216044856-a8371794149d/go.mod h1:0+TTO4EOBfRPhZXAeF1Vu+W3hHZ8eLp8PgKVZlcvtFY=
|
||||
github.com/docker/distribution v2.7.0+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
|
||||
github.com/docker/distribution v2.7.1+incompatible h1:a5mlkVzth6W5A4fOsS3D2EO5BUmsJpcB+cRlLU7cSug=
|
||||
github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
|
||||
github.com/docker/docker v0.7.3-0.20190103212154-2b7e084dc98b/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||
github.com/docker/docker v0.7.3-0.20190327010347-be7ac8be2ae0/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||
github.com/docker/docker v0.7.3-0.20190817195342-4760db040282/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||
github.com/docker/docker v1.4.2-0.20200203170920-46ec8731fbce h1:KXS1Jg+ddGcWA8e1N7cupxaHHZhit5rB9tfDU+mfjyY=
|
||||
github.com/docker/docker v1.4.2-0.20200203170920-46ec8731fbce/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||
github.com/docker/docker-credential-helpers v0.6.3/go.mod h1:WRaJzqw3CTB9bk10avuGsjVBZsD05qeibJ1/TYlvc0Y=
|
||||
github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec=
|
||||
github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA=
|
||||
github.com/docker/go-metrics v0.0.0-20180209012529-399ea8c73916/go.mod h1:/u0gXw0Gay3ceNrsHubL3BtdOL2fHf93USgMTe0W5dI=
|
||||
github.com/docker/go-units v0.3.3/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
|
||||
github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw=
|
||||
github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
|
||||
github.com/docker/libtrust v0.0.0-20150114040149-fa567046d9b1/go.mod h1:cyGadeNEkKy96OOhEzfZl+yxihPEzKnqJwvfuSUqbZE=
|
||||
github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM=
|
||||
@@ -370,6 +376,7 @@ github.com/gogo/protobuf v1.3.1 h1:DqDEcV5aeaTmdFBePNpYsp3FlcVH/2ISVVM9Qf8PSls=
|
||||
github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
|
||||
github.com/golang-migrate/migrate/v4 v4.6.2/go.mod h1:JYi6reN3+Z734VZ0akNuyOJNcrg45ZL7LDBMW3WGJL0=
|
||||
github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe/go.mod h1:8vg3r2VgvsThLBIFL93Qb5yWzgyZWhEmBwUJWevAkK0=
|
||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58=
|
||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
||||
github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef h1:veQD95Isof8w9/WXiA+pa3tz3fJXkt5B7QaRBrM62gk=
|
||||
@@ -407,6 +414,7 @@ github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMyw
|
||||
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||
github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4=
|
||||
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.2 h1:X2ev0eStA3AbceY54o37/0PQ/UWqKEiiO2dKL5OPaFM=
|
||||
github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ=
|
||||
@@ -429,6 +437,8 @@ github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY=
|
||||
github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/google/uuid v1.1.2 h1:EVhdT+1Kseyi1/pUmXKaFxYsDNy9RQYkMWRH68J/W7Y=
|
||||
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I=
|
||||
github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/googleapis/gax-go v2.0.2+incompatible/go.mod h1:SFVmujtThgffbyetf+mdk2eWhX2bMyUtNHzFKcPA9HY=
|
||||
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
|
||||
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
|
||||
@@ -454,6 +464,8 @@ github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2z
|
||||
github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
|
||||
github.com/gorilla/websocket v1.4.0 h1:WDFjx/TMzVgy9VdMMQi2K2Emtwi2QcUQsztZ/zLaH/Q=
|
||||
github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
|
||||
github.com/gorilla/websocket v1.4.2 h1:+/TMaTYc4QFitKJxsQ7Yye35DkWvkdLcvGKqM+x0Ufc=
|
||||
github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
|
||||
github.com/gosuri/uitable v0.0.4/go.mod h1:tKR86bXuXPZazfOTG1FIzvjIdXzd0mo4Vtn16vt0PJo=
|
||||
github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA=
|
||||
github.com/grpc-ecosystem/go-grpc-middleware v0.0.0-20190222133341-cfaf5686ec79/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
|
||||
@@ -554,14 +566,17 @@ github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1
|
||||
github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk=
|
||||
github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
|
||||
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
|
||||
github.com/julienschmidt/httprouter v1.3.0 h1:U0609e9tgbseu3rBINet9P48AI/D3oJs4dN7jwJOQ1U=
|
||||
github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM=
|
||||
github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0/go.mod h1:1NbS8ALrpOvjt0rHPNLyCIeMtbizbir8U//inJ+zuB8=
|
||||
github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
|
||||
github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00=
|
||||
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
||||
github.com/klauspost/compress v1.11.12/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.2 h1:DB17ag19krx9CFsz4o3enTrPXyIXCl+2iCXH/aMAp9s=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.3 h1:CE8S1cTafDpPvMhIxNJKvHsGVBgn1xWYf1NbHQhywc8=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
|
||||
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
|
||||
@@ -620,6 +635,7 @@ github.com/miekg/dns v1.1.15/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3N
|
||||
github.com/miekg/dns v1.1.22/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso=
|
||||
github.com/mikefarah/yaml/v2 v2.4.0/go.mod h1:ahVqZF4n1W4NqwvVnZzC4es67xsW9uR/RRf2RRxieJU=
|
||||
github.com/mikefarah/yq/v2 v2.4.1/go.mod h1:i8SYf1XdgUvY2OFwSqGAtWOOgimD2McJ6iutoxRm4k0=
|
||||
github.com/minio/highwayhash v1.0.1/go.mod h1:BQskDq+xkJ12lmlUUi7U0M5Swg3EWR+dLTk+kldvVxY=
|
||||
github.com/minio/minio-go/v6 v6.0.49/go.mod h1:qD0lajrGW49lKZLtXKtCB4X/qkMf0a5tBvN2PaZg7Gg=
|
||||
github.com/minio/sha256-simd v0.1.1/go.mod h1:B5e1o+1/KgNmWrSQK08Y6Z1Vb5pwIktudl0J58iy0KM=
|
||||
github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc=
|
||||
@@ -654,10 +670,16 @@ github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+
|
||||
github.com/nakagami/firebirdsql v0.0.0-20190310045651-3c02a58cfed8/go.mod h1:86wM1zFnC6/uDBfZGNwB65O+pR2OFi5q/YQaEUid1qA=
|
||||
github.com/nats-io/jwt v0.3.0/go.mod h1:fRYCDE99xlTsqUzISS1Bi75UBJ6ljOJQOAAu5VglpSg=
|
||||
github.com/nats-io/jwt v0.3.2/go.mod h1:/euKqTS1ZD+zzjYrY7pseZrTtWQSjujC7xjPc8wL6eU=
|
||||
github.com/nats-io/jwt v1.2.2/go.mod h1:/xX356yQA6LuXI9xWW7mZNpxgF2mBmGecH+Fj34sP5Q=
|
||||
github.com/nats-io/jwt/v2 v2.0.2/go.mod h1:VRP+deawSXyhNjXmxPCHskrR6Mq50BqpEI5SEcNiGlY=
|
||||
github.com/nats-io/nats-server/v2 v2.1.2/go.mod h1:Afk+wRZqkMQs/p45uXdrVLuab3gwv3Z8C4HTBu8GD/k=
|
||||
github.com/nats-io/nats-server/v2 v2.3.2/go.mod h1:dUf7Cm5z5LbciFVwWx54owyCKm8x4/hL6p7rrljhLFY=
|
||||
github.com/nats-io/nats.go v1.9.1/go.mod h1:ZjDU1L/7fJ09jvUSRVBR2e7+RnLiiIQyqyzEE/Zbp4w=
|
||||
github.com/nats-io/nats.go v1.11.1-0.20210623165838-4b75fc59ae30/go.mod h1:BPko4oXsySz4aSWeFgOHLZs3G4Jq4ZAyE6/zMCxRT6w=
|
||||
github.com/nats-io/nkeys v0.1.0/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w=
|
||||
github.com/nats-io/nkeys v0.1.3/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w=
|
||||
github.com/nats-io/nkeys v0.2.0/go.mod h1:XdZpAbhgyyODYqjTawOnIOI7VlbKSarI9Gfy1tqEu/s=
|
||||
github.com/nats-io/nkeys v0.3.0/go.mod h1:gvUNGjVcM2IPr5rCsRsC6Wb3Hr2CQAm08dsxtV6A5y4=
|
||||
github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c=
|
||||
github.com/ncw/swift v1.0.47/go.mod h1:23YIA4yWVnGwv2dQlN4bB7egfYX6YLn0Yo/S6zZO/ZM=
|
||||
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs=
|
||||
@@ -692,11 +714,14 @@ github.com/onsi/gomega v1.9.0/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoT
|
||||
github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk=
|
||||
github.com/opencontainers/go-digest v0.0.0-20170106003457-a6d0ee40d420/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
|
||||
github.com/opencontainers/go-digest v0.0.0-20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
|
||||
github.com/opencontainers/go-digest v1.0.0-rc1 h1:WzifXhOVOEOuFYOJAW6aQqW0TooG2iki3E3Ii+WN7gQ=
|
||||
github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
|
||||
github.com/opencontainers/image-spec v1.0.0/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0=
|
||||
github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0=
|
||||
github.com/opencontainers/image-spec v1.0.2-0.20190823105129-775207bd45b6 h1:yN8BPXVwMBAm3Cuvh1L5XE8XpvYRMdsVLd82ILprhUU=
|
||||
github.com/opencontainers/image-spec v1.0.2-0.20190823105129-775207bd45b6/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0=
|
||||
github.com/opencontainers/runc v0.0.0-20190115041553-12f6a991201f/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
|
||||
github.com/opencontainers/runc v0.1.1 h1:GlxAyO6x8rfZYN9Tt0Kti5a/cP41iuiO2yYT0IJGY8Y=
|
||||
github.com/opencontainers/runc v0.1.1/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
|
||||
github.com/opencontainers/runtime-spec v0.1.2-0.20190507144316-5b71a03e2700/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
|
||||
github.com/opencontainers/runtime-tools v0.0.0-20181011054405-1d69bd0f9c39/go.mod h1:r3f7wjNzSs2extwzU3Y+6pKfobzPh+kKFJ3ofN+3nfs=
|
||||
@@ -823,9 +848,14 @@ github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da/go.mod h1:gi+0
|
||||
github.com/santhosh-tekuri/jsonschema v1.2.4/go.mod h1:TEAUOeZSmIxTTuHatJzrvARHiuO9LYd+cIxzgEHCQI4=
|
||||
github.com/satori/go.uuid v0.0.0-20160603004225-b111a074d5ef/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0=
|
||||
github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0=
|
||||
github.com/schollz/peerdiscovery v1.6.1 h1:2V/Dw+GZY18W6e3yAeUzJouHmIcr9UlWtqsQtpfObGw=
|
||||
github.com/schollz/peerdiscovery v1.6.1/go.mod h1:bq5/NB9o9/jyEwiW4ubehfToBa2LwdQQMoNiy/vSdYg=
|
||||
github.com/sclevine/spec v1.2.0/go.mod h1:W4J29eT/Kzv7/b9IWLB055Z+qvVC9vt0Arko24q7p+U=
|
||||
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc=
|
||||
github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo=
|
||||
github.com/sethvargo/go-envconfig v0.3.0/go.mod h1:XZ2JRR7vhlBEO5zMmOpLgUhgYltqYqq4d4tKagtPUv0=
|
||||
github.com/sethvargo/go-envconfig v0.3.2 h1:277Lb2iTpUZjUZu1qLoLa/aetwvtZbKh8wNWXmc6dSk=
|
||||
github.com/sethvargo/go-envconfig v0.3.2/go.mod h1:XZ2JRR7vhlBEO5zMmOpLgUhgYltqYqq4d4tKagtPUv0=
|
||||
github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24/go.mod h1:M+9NzErvs504Cn4c5DxATwIqPbtswREoFCre64PpcG4=
|
||||
github.com/shurcooL/httpfs v0.0.0-20171119174359-809beceb2371/go.mod h1:ZY1cvUeJuFPAdZ/B6v7RHavJWZn2YPVFQ1OSXhCGOkg=
|
||||
github.com/shurcooL/httpfs v0.0.0-20190707220628-8d4bc4ba7749/go.mod h1:ZY1cvUeJuFPAdZ/B6v7RHavJWZn2YPVFQ1OSXhCGOkg=
|
||||
@@ -838,6 +868,7 @@ github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMB
|
||||
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
|
||||
github.com/sirupsen/logrus v1.5.0 h1:1N5EYkVAPEywqZRJd7cwnRtCb6xJx7NH3T3WUTF980Q=
|
||||
github.com/sirupsen/logrus v1.5.0/go.mod h1:+F7Ogzej0PZc/94MaYx/nvG9jOFMD2osvC3s+Squfpo=
|
||||
github.com/sirupsen/logrus v1.6.0 h1:UBcNElsrwanuuMsnGSlYmtmgbb23qDR5dG+6X6Oo89I=
|
||||
github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88=
|
||||
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
|
||||
github.com/smartystreets/assertions v1.0.1/go.mod h1:kHHU4qYBaI3q23Pp3VPrmWhuIUrLW/7eUrw0BU5VaoM=
|
||||
@@ -885,6 +916,11 @@ github.com/stretchr/testify v1.5.1 h1:nOGnQDM7FYENwehXlg/kFVnos3rEvtKTjRvOWSzb6H
|
||||
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
|
||||
github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd0=
|
||||
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/suborbital/grav v0.4.1 h1:g70gG4EVqNcy5LMII05ayLtxMD8v5M9kBW1BcJFYsC0=
|
||||
github.com/suborbital/grav v0.4.1/go.mod h1:jN+zB9O6ztW2GqruEU46EMOFjvc8K2UDLyofFJWdI8o=
|
||||
github.com/suborbital/vektor v0.2.2/go.mod h1:6YQE7r6t1JcVs3twpqjXDftsLUaTNUk5YorRKHcDamI=
|
||||
github.com/suborbital/vektor v0.5.0 h1:E5PPiBYboarFoprUmjjG/ieVCeIUpD/1F2MnVa/iaDs=
|
||||
github.com/suborbital/vektor v0.5.0/go.mod h1:iNMR6/alEK1D7fbupwIlGlK5LajngEvq/N+RGVWaqNw=
|
||||
github.com/syndtr/gocapability v0.0.0-20170704070218-db04d3cc01c8/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
|
||||
github.com/thanos-io/thanos v0.11.0/go.mod h1:N/Yes7J68KqvmY+xM6J5CJqEvWIvKSR5sqGtmuD6wDc=
|
||||
github.com/tidwall/pretty v0.0.0-20180105212114-65a9db5fad51/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk=
|
||||
@@ -986,10 +1022,16 @@ golang.org/x/crypto v0.0.0-20191112222119-e1110fd1c708/go.mod h1:LzIPMQfyMNhhGPh
|
||||
golang.org/x/crypto v0.0.0-20200128174031-69ecbb4d6d5d/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20200220183623-bac4c82f6975 h1:/Tl7pH94bvbAAHBdZJT947M/+gp0+CqQXDtMRC0fseo=
|
||||
golang.org/x/crypto v0.0.0-20200220183623-bac4c82f6975/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20200323165209-0ec3e9974c59/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20200414173820-0848c9571904 h1:bXoxMPcSLOq08zI3/c5dEBT6lE4eh+jOh886GHrn6V8=
|
||||
golang.org/x/crypto v0.0.0-20200414173820-0848c9571904/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 h1:psW17arqaxU48Z5kZ0CQnkZWQJsqcURM6tKiBApRjXI=
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20200728195943-123391ffb6de/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20210314154223-e6e6c4f2bb5b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
|
||||
golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||
golang.org/x/crypto v0.0.0-20210817164053-32db794688a5 h1:HWj/xjIHfjYU5nVXpTM0s39J9CbLn7Cc5a7IC5rwsMQ=
|
||||
golang.org/x/crypto v0.0.0-20210817164053-32db794688a5/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190125153040-c74c464bbbf2/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
@@ -1057,6 +1099,11 @@ golang.org/x/net v0.0.0-20200301022130-244492dfa37a h1:GuSPYbZzB5/dcLNCwLQLsg3ob
|
||||
golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200625001655-4c5254603344 h1:vGXIOMxbNfDTk/aXCmfdLgkrSV+Z2tcbze+pEc3v5W4=
|
||||
golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
|
||||
golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
|
||||
golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
||||
golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d h1:LO7XpTYMwTqxjLcGWPijK3vRXg1aWdlNOVOHRq45d7c=
|
||||
golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20181106182150-f42d05182288/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
@@ -1080,6 +1127,7 @@ golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5h
|
||||
golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190102155601-82a175fd1598/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190130150945-aca44879d564/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190209173611-3b5209105503/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
@@ -1126,6 +1174,14 @@ golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7w
|
||||
golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae h1:Ih9Yo4hSPImZOpfGuA4bR/ORKTAbhZo2AbWNRCnevdo=
|
||||
golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20201113135734-0a15ea8d9b02/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1 h1:SrN+KX8Art/Sf4HNj6Zcz06G7VEz+7w9tdXTPOZ7+l4=
|
||||
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1 h1:v+OssWQX+hTHEmOBgwxdZxK4zHq3yOs8F9J7mk0PY8E=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.1-0.20180805044716-cb6730876b98/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
@@ -1133,12 +1189,19 @@ golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3
|
||||
golang.org/x/text v0.3.1-0.20181227161524-e6919f6577db/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
||||
golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs=
|
||||
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk=
|
||||
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
||||
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 h1:SvFZT6jyqRaOeXpc5h/JSfZenJ2O330aBsf7JfSUXmQ=
|
||||
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20191024005414-555d28b269f0 h1:/5xXl8Y5W96D+TtHSlonuFqGHIWVuyCkGJLwGh9JJFs=
|
||||
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1 h1:NusfzzA6yGQ+ua51ck7E3omNUX/JuqbFSaRGqU8CcLI=
|
||||
golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
@@ -1229,6 +1292,7 @@ google.golang.org/genproto v0.0.0-20190927181202-20e1ac93f88c/go.mod h1:IbNlFCBr
|
||||
google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
|
||||
google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9 h1:6XzpBoANz1NqMNfDXzc2QmHmbb1vyMsvRfoP5rM+K1I=
|
||||
google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
|
||||
google.golang.org/genproto v0.0.0-20200117163144-32f20d992d24 h1:wDju+RU97qa0FZT0QnZDg9Uc2dH0Ql513kFvHocz+WM=
|
||||
google.golang.org/genproto v0.0.0-20200117163144-32f20d992d24/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
|
||||
google.golang.org/grpc v0.0.0-20160317175043-d3ddb4469d5a/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
|
||||
google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs=
|
||||
@@ -1297,6 +1361,7 @@ gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v3 v3.0.0-20190905181640-827449938966/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo=
|
||||
gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw=
|
||||
gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk=
|
||||
helm.sh/helm/v3 v3.2.4/go.mod h1:ZaXz/vzktgwjyGGFbUWtIQkscfE7WYoRGP2szqAFHR0=
|
||||
@@ -1316,6 +1381,7 @@ k8s.io/api v0.0.0-20191122220107-b5267f2975e0/go.mod h1:vYpRfxYkMrmPPSesoHEkGNHx
|
||||
k8s.io/api v0.18.0/go.mod h1:q2HRQkfDzHMBZL9l/y9rH63PkQl4vae0xRT+8prbrK8=
|
||||
k8s.io/api v0.18.2 h1:wG5g5ZmSVgm5B+eHMIbI9EGATS2L8Z72rda19RIEgY8=
|
||||
k8s.io/api v0.18.2/go.mod h1:SJCWI7OLzhZSvbY7U8zwNl9UA4o1fizoug34OV/2r78=
|
||||
k8s.io/api v0.22.2 h1:M8ZzAD0V6725Fjg53fKeTJxGsJvRbk4TEm/fexHMtfw=
|
||||
k8s.io/apiextensions-apiserver v0.0.0-20190918161926-8f644eb6e783/go.mod h1:xvae1SZB3E17UpV59AWc271W/Ph25N+bjPyR63X6tPY=
|
||||
k8s.io/apiextensions-apiserver v0.18.0/go.mod h1:18Cwn1Xws4xnWQNC00FLq1E350b9lUF+aOdIWDOZxgo=
|
||||
k8s.io/apiextensions-apiserver v0.18.2 h1:I4v3/jAuQC+89L3Z7dDgAiN4EOjN6sbm6iBqQwHTah8=
|
||||
@@ -1331,7 +1397,9 @@ k8s.io/apimachinery v0.18.2/go.mod h1:9SnR/e11v5IbyPCGbvJViimtJ0SwHG4nfZFjU77ftc
|
||||
k8s.io/apiserver v0.0.0-20190918160949-bfa5e2e684ad/go.mod h1:XPCXEwhjaFN29a8NldXA901ElnKeKLrLtREO9ZhFyhg=
|
||||
k8s.io/apiserver v0.0.0-20191122221311-9d521947b1e1/go.mod h1:RbsZY5zzBIWnz4KbctZsTVjwIuOpTp4Z8oCgFHN4kZQ=
|
||||
k8s.io/apiserver v0.18.0/go.mod h1:3S2O6FeBBd6XTo0njUrLxiqk8GNy6wWOftjhJcXYnjw=
|
||||
k8s.io/apiserver v0.18.2 h1:fwKxdTWwwYhxvtjo0UUfX+/fsitsNtfErPNegH2x9ic=
|
||||
k8s.io/apiserver v0.18.2/go.mod h1:Xbh066NqrZO8cbsoenCwyDJ1OSi8Ag8I2lezeHxzwzw=
|
||||
k8s.io/apiserver v0.22.2 h1:TdIfZJc6YNhu2WxeAOWq1TvukHF0Sfx0+ln4XK9qnL4=
|
||||
k8s.io/autoscaler v0.0.0-20190607113959-1b4f1855cb8e/go.mod h1:QEXezc9uKPT91dwqhSJq3GNI3B1HxFRQHiku9kmrsSA=
|
||||
k8s.io/cli-runtime v0.18.0/go.mod h1:1eXfmBsIJosjn9LjEBUd2WVPoPAY9XGTqTFcPMIBsUQ=
|
||||
k8s.io/cli-runtime v0.18.2/go.mod h1:yfFR2sQQzDsV0VEKGZtrJwEy4hLZ2oj4ZIfodgxAHWQ=
|
||||
@@ -1343,6 +1411,7 @@ k8s.io/code-generator v0.18.2/go.mod h1:+UHX5rSbxmR8kzS+FAv7um6dtYrZokQvjHpDSYRV
|
||||
k8s.io/component-base v0.0.0-20190918160511-547f6c5d7090/go.mod h1:933PBGtQFJky3TEwYx4aEPZ4IxqhWh3R6DCmzqIn1hA=
|
||||
k8s.io/component-base v0.0.0-20191122220729-2684fb322cb9/go.mod h1:NFuUusy/X4Tk21m21tcNUihnmp4OI7lXU7/xA+rYXkc=
|
||||
k8s.io/component-base v0.18.0/go.mod h1:u3BCg0z1uskkzrnAKFzulmYaEpZF7XC9Pf/uFyb1v2c=
|
||||
k8s.io/component-base v0.18.2 h1:SJweNZAGcUvsypLGNPNGeJ9UgPZQ6+bW+gEHe8uyh/Y=
|
||||
k8s.io/component-base v0.18.2/go.mod h1:kqLlMuhJNHQ9lz8Z7V5bxUUtjFZnrypArGl58gmDfUM=
|
||||
k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
|
||||
k8s.io/gengo v0.0.0-20190822140433-26a664648505/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
|
||||
@@ -1367,6 +1436,7 @@ k8s.io/kube-state-metrics v1.7.2/go.mod h1:U2Y6DRi07sS85rmVPmBFlmv+2peBcL8IWGjM+
|
||||
k8s.io/kubectl v0.18.0/go.mod h1:LOkWx9Z5DXMEg5KtOjHhRiC1fqJPLyCr3KtQgEolCkU=
|
||||
k8s.io/kubectl v0.18.2 h1:9jnGSOC2DDVZmMUTMi0D1aed438mfQcgqa5TAzVjA1k=
|
||||
k8s.io/kubectl v0.18.2/go.mod h1:OdgFa3AlsPKRpFFYE7ICTwulXOcMGXHTc+UKhHKvrb4=
|
||||
k8s.io/kubernetes v1.13.0 h1:qTfB+u5M92k2fCCCVP2iuhgwwSOv1EkAkvQY1tQODD8=
|
||||
k8s.io/kubernetes v1.13.0/go.mod h1:ocZa8+6APFNC2tX1DZASIbocyYT5jHzqFVsY5aoB7Jk=
|
||||
k8s.io/metrics v0.18.0/go.mod h1:8aYTW18koXqjLVKL7Ds05RPMX9ipJZI3mywYvBOxXd4=
|
||||
k8s.io/metrics v0.18.2/go.mod h1:qga8E7QfYNR9Q89cSCAjinC9pTZ7yv1XSVGUB0vJypg=
|
||||
|
@@ -7,20 +7,18 @@ COPY go.mod go.mod
|
||||
COPY go.sum go.sum
|
||||
|
||||
# Copy the go source
|
||||
COPY cmd/manager/main.go main.go
|
||||
COPY pkg/ pkg/
|
||||
COPY version/ version/
|
||||
COPY operator/cmd/manager/main.go operator/main.go
|
||||
COPY operator/pkg/ operator/pkg/
|
||||
COPY operator/version/ operator/version/
|
||||
COPY vendor/ vendor/
|
||||
# Build
|
||||
ARG operator_version=dev
|
||||
RUN CGO_ENABLED=0 \
|
||||
GOOS=linux \
|
||||
GOARCH=amd64 \
|
||||
GO111MODULE=on \
|
||||
go build \
|
||||
-ldflags "-X version.Version=$operator_version" \
|
||||
-ldflags "-X \"github.com/1Password/onepassword-operator/operator/version.Version=$operator_version\"" \
|
||||
-mod vendor \
|
||||
-a -o manager main.go
|
||||
-a -o manager operator/main.go
|
||||
|
||||
# Use distroless as minimal base image to package the manager binary
|
||||
# Refer to https://github.com/GoogleContainerTools/distroless for more details
|
||||
@@ -28,6 +26,6 @@ FROM gcr.io/distroless/static:nonroot
|
||||
WORKDIR /
|
||||
COPY --from=builder /workspace/manager .
|
||||
USER nonroot:nonroot
|
||||
COPY deploy/connect/ deploy/connect/
|
||||
COPY operator/deploy/connect/ operator/deploy/connect/
|
||||
|
||||
ENTRYPOINT ["/manager"]
|
235
operator/README.md
Normal file
235
operator/README.md
Normal file
@@ -0,0 +1,235 @@
|
||||
# 1Password Connect Kubernetes Operator
|
||||
|
||||
The 1Password Connect Kubernetes Operator provides the ability to integrate Kubernetes with 1Password. This Operator manages `OnePasswordItem` Custom Resource Definitions (CRDs) that define the location of an Item stored in 1Password. The `OnePasswordItem` CRD, when created, will be used to compose a Kubernetes Secret containing the contents of the specified item.
|
||||
|
||||
The 1Password Connect Kubernetes Operator also allows for Kubernetes Secrets to be composed from a 1Password Item through annotation of an Item Path on a deployment.
|
||||
|
||||
The 1Password Connect Kubernetes Operator will continually check for updates from 1Password for any Kubernetes Secret that it has generated. If a Kubernetes Secret is updated, any Deployment using that secret can be automatically restarted.
|
||||
|
||||
## Setup
|
||||
|
||||
Prerequisites:
|
||||
|
||||
- [1Password Command Line Tool Installed](https://1password.com/downloads/command-line/)
|
||||
- [kubectl installed](https://kubernetes.io/docs/tasks/tools/install-kubectl/)
|
||||
- [docker installed](https://docs.docker.com/get-docker/)
|
||||
- [Generated a 1password-credentials.json file and issued a 1Password Connect API Token for the K8s Operator integration](https://support.1password.com/secrets-automation/)
|
||||
- [1Password Connect deployed to Kubernetes](https://support.1password.com/connect-deploy-kubernetes/#step-2-deploy-a-1password-connect-server). **NOTE**: If customization of the 1Password Connect deployment is not required you can skip this prerequisite.
|
||||
|
||||
### Quickstart for Deploying 1Password Connect to Kubernetes
|
||||
|
||||
|
||||
#### Deploy with Helm
|
||||
The 1Password Connect Helm Chart helps to simplify the deployment of 1Password Connect and the 1Password Connect Kubernetes Operator to Kubernetes.
|
||||
|
||||
[The 1Password Connect Helm Chart can be found here.](https://github.com/1Password/connect-helm-charts)
|
||||
|
||||
#### Deploy using the Connect Operator
|
||||
If 1Password Connect is already running, you can skip this step. This guide will provide a quickstart option for deploying a default configuration of 1Password Connect via starting the deploying the 1Password Connect Operator, however it is recommended that you instead deploy your own manifest file if customization of the 1Password Connect deployment is desired.
|
||||
|
||||
Encode the 1password-credentials.json file you generated in the prerequisite steps and save it to a file named op-session:
|
||||
|
||||
```bash
|
||||
$ cat 1password-credentials.json | base64 | \
|
||||
tr '/+' '_-' | tr -d '=' | tr -d '\n' > op-session
|
||||
```
|
||||
|
||||
Create a Kubernetes secret from the op-session file:
|
||||
```bash
|
||||
|
||||
$ kubectl create secret generic op-credentials --from-file=1password-credentials.json
|
||||
```
|
||||
|
||||
Add the following environment variable to the onepassword-connect-operator container in `deploy/operator.yaml`:
|
||||
```yaml
|
||||
- name: MANAGE_CONNECT
|
||||
value: "true"
|
||||
```
|
||||
Adding this environment variable will have the operator automatically deploy a default configuration of 1Password Connect to the `default` namespace.
|
||||
### Kubernetes Operator Deployment
|
||||
|
||||
**Create Kubernetes Secret for OP_CONNECT_TOKEN**
|
||||
|
||||
"Create a Connect token for the operator and save it as a Kubernetes Secret:
|
||||
|
||||
```bash
|
||||
$ kubectl create secret generic onepassword-token --from-literal=token=<OP_CONNECT_TOKEN>"
|
||||
```
|
||||
|
||||
If you do not have a token for the operator, you can generate a token and save it to kubernetes with the following command:
|
||||
```bash
|
||||
$ kubectl create secret generic onepassword-token --from-literal=token=$(op create connect token <server> op-k8s-operator --vault <vault>)
|
||||
```
|
||||
|
||||
[More information on generating a token can be found here](https://support.1password.com/secrets-automation/#appendix-issue-additional-access-tokens)
|
||||
|
||||
**Set Permissions For Operator**
|
||||
|
||||
We must create a service account, role, and role binding and Kubernetes. Examples can be found in the `/deploy` folder.
|
||||
|
||||
```bash
|
||||
$ kubectl apply -f deploy/permissions.yaml
|
||||
```
|
||||
|
||||
**Create Custom One Password Secret Resource**
|
||||
|
||||
```bash
|
||||
$ kubectl apply -f deploy/crds/onepassword.com_onepassworditems_crd.yaml
|
||||
```
|
||||
|
||||
**Deploying the Operator**
|
||||
|
||||
An sample Deployment yaml can be found at `/deploy/operator.yaml`.
|
||||
|
||||
|
||||
To further configure the 1Password Kubernetes Operator the Following Environment variables can be set in the operator yaml:
|
||||
|
||||
- **OP_CONNECT_HOST** (required): Specifies the host name within Kubernetes in which to access the 1Password Connect.
|
||||
- **WATCH_NAMESPACE:** (default: watch all namespaces): Comma separated list of what Namespaces to watch for changes.
|
||||
- **POLLING_INTERVAL** (default: 600): The number of seconds the 1Password Kubernetes Operator will wait before checking for updates from 1Password Connect.
|
||||
- **MANAGE_CONNECT** (default: false): If set to true, on deployment of the operator, a default configuration of the OnePassword Connect Service will be deployed to the `default` namespace.
|
||||
- **AUTO_RESTART** (default: false): If set to true, the operator will restart any deployment using a secret from 1Password Connect. This can be overwritten by namespace, deployment, or individual secret. More details on AUTO_RESTART can be found in the ["Configuring Automatic Rolling Restarts of Deployments"](#configuring-automatic-rolling-restarts-of-deployments) section.
|
||||
|
||||
Apply the deployment file:
|
||||
|
||||
```yaml
|
||||
kubectl apply -f deploy/operator.yaml
|
||||
```
|
||||
|
||||
## Usage
|
||||
|
||||
To create a Kubernetes Secret from a 1Password item, create a yaml file with the following
|
||||
|
||||
```yaml
|
||||
apiVersion: onepassword.com/v1
|
||||
kind: OnePasswordItem
|
||||
metadata:
|
||||
name: <item_name> #this name will also be used for naming the generated kubernetes secret
|
||||
spec:
|
||||
itemPath: "vaults/<vault_id_or_title>/items/<item_id_or_title>"
|
||||
```
|
||||
|
||||
Deploy the OnePasswordItem to Kubernetes:
|
||||
|
||||
```bash
|
||||
$ kubectl apply -f <your_item>.yaml
|
||||
```
|
||||
|
||||
To test that the Kubernetes Secret check that the following command returns a secret:
|
||||
|
||||
```bash
|
||||
$ kubectl get secret <secret_name>
|
||||
```
|
||||
|
||||
Note: Deleting the `OnePasswordItem` that you've created will automatically delete the created Kubernetes Secret.
|
||||
|
||||
To create a single Kubernetes Secret for a deployment, add the following annotations to the deployment metadata:
|
||||
|
||||
```yaml
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: deployment-example
|
||||
annotations:
|
||||
operator.1password.io/item-path: "vaults/<vault_id_or_title>/items/<item_id_or_title>"
|
||||
operator.1password.io/item-name: "<secret_name>"
|
||||
```
|
||||
|
||||
Applying this yaml file will create a Kubernetes Secret with the name `<secret_name>` and contents from the location specified at the specified Item Path.
|
||||
|
||||
Note: Deleting the Deployment that you've created will automatically delete the created Kubernetes Secret only if the deployment is still annotated with `operator.1password.io/item-path` and `operator.1password.io/item-name` and no other deployment is using the secret.
|
||||
|
||||
If a 1Password Item that is linked to a Kubernetes Secret is updated within the POLLING_INTERVAL the associated Kubernetes Secret will be updated. However, if you do not want a specific secret to be updated you can add the tag `operator.1password.io:ignore-secret` to the item stored in 1Password. While this tag is in place, any updates made to an item will not trigger an update to the associated secret in Kubernetes.
|
||||
|
||||
---
|
||||
**NOTE**
|
||||
|
||||
If multiple 1Password vaults/items have the same `title` when using a title in the access path, the desired action will be performed on the oldest vault/item.
|
||||
|
||||
Titles and field names that include white space and other characters that are not a valid [DNS subdomain name](https://kubernetes.io/docs/concepts/configuration/secret/) will create Kubernetes secrets that have titles and fields in the following format:
|
||||
- Invalid characters before the first alphanumeric character and after the last alphanumeric character will be removed
|
||||
- All whitespaces between words will be replaced by `-`
|
||||
- All the letters will be lower-cased.
|
||||
|
||||
---
|
||||
|
||||
### Configuring Automatic Rolling Restarts of Deployments
|
||||
|
||||
If a 1Password Item that is linked to a Kubernetes Secret is updated, any deployments configured to `auto-restart` AND are using that secret will be given a rolling restart the next time 1Password Connect is polled for updates.
|
||||
|
||||
There are many levels of granularity on which to configure auto restarts on deployments: at the operator level, per-namespace, or per-deployment.
|
||||
|
||||
**On the operator**: This method allows for managing auto restarts on all deployments within the namespaces watched by operator. Auto restarts can be enabled by setting the environemnt variable `AUTO_RESTART` to true. If the value is not set, the operator will default this value to false.
|
||||
|
||||
**Per Namespace**: This method allows for managing auto restarts on all deployments within a namespace. Auto restarts can by managed by setting the annotation `operator.1password.io/auto-restart` to either `true` or `false` on the desired namespace. An example of this is shown below:
|
||||
```yaml
|
||||
# enabled auto restarts for all deployments within a namespace unless overwritten within a deployment
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: "example-namespace"
|
||||
annotations:
|
||||
operator.1password.io/auto-restart: "true"
|
||||
```
|
||||
If the value is not set, the auto reset settings on the operator will be used. This value can be overwritten by deployment.
|
||||
|
||||
**Per Deployment**
|
||||
This method allows for managing auto restarts on a given deployment. Auto restarts can by managed by setting the annotation `operator.1password.io/auto-restart` to either `true` or `false` on the desired deployment. An example of this is shown below:
|
||||
```yaml
|
||||
# enabled auto restarts for the deployment
|
||||
apiVersion: v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: "example-deployment"
|
||||
annotations:
|
||||
operator.1password.io/auto-restart: "true"
|
||||
```
|
||||
If the value is not set, the auto reset settings on the namespace will be used.
|
||||
|
||||
**Per OnePasswordItem Custom Resource**
|
||||
This method allows for managing auto restarts on a given OnePasswordItem custom resource. Auto restarts can by managed by setting the annotation `operator.1password.io/auto_restart` to either `true` or `false` on the desired OnePasswordItem. An example of this is shown below:
|
||||
```yaml
|
||||
# enabled auto restarts for the OnePasswordItem
|
||||
apiVersion: onepassword.com/v1
|
||||
kind: OnePasswordItem
|
||||
metadata:
|
||||
name: example
|
||||
annotations:
|
||||
operator.1password.io/auto-restart: "true"
|
||||
```
|
||||
If the value is not set, the auto reset settings on the deployment will be used.
|
||||
|
||||
## Development
|
||||
|
||||
### Creating a Docker image
|
||||
|
||||
To create a local version of the Docker image for testing, use the following `Makefile` target:
|
||||
```shell
|
||||
make build/local
|
||||
```
|
||||
|
||||
### Building the Operator binary
|
||||
```shell
|
||||
make build/binary
|
||||
```
|
||||
|
||||
The binary will be placed inside a `dist` folder within this repository.
|
||||
|
||||
### Running Tests
|
||||
|
||||
```shell
|
||||
make test
|
||||
```
|
||||
|
||||
With coverage:
|
||||
```shell
|
||||
make test/coverage
|
||||
```
|
||||
|
||||
## Security
|
||||
|
||||
1Password requests you practice responsible disclosure if you discover a vulnerability.
|
||||
|
||||
Please file requests via [**BugCrowd**](https://bugcrowd.com/agilebits).
|
||||
|
||||
For information about security practices, please visit our [Security homepage](https://bugcrowd.com/agilebits).
|
@@ -11,16 +11,21 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/1Password/onepassword-operator/pkg/controller"
|
||||
op "github.com/1Password/onepassword-operator/pkg/onepassword"
|
||||
"github.com/1Password/onepassword-operator/operator/pkg/controller"
|
||||
op "github.com/1Password/onepassword-operator/operator/pkg/onepassword"
|
||||
"github.com/1Password/onepassword-operator/operator/pkg/onepassword/message"
|
||||
"github.com/suborbital/grav/discovery/local"
|
||||
"github.com/suborbital/grav/grav"
|
||||
"github.com/suborbital/grav/transport/websocket"
|
||||
"github.com/suborbital/vektor/vlog"
|
||||
|
||||
// Import all Kubernetes client auth plugins (e.g. Azure, GCP, OIDC, etc.)
|
||||
|
||||
_ "k8s.io/client-go/plugin/pkg/client/auth"
|
||||
"k8s.io/client-go/rest"
|
||||
|
||||
"github.com/1Password/onepassword-operator/pkg/apis"
|
||||
"github.com/1Password/onepassword-operator/version"
|
||||
"github.com/1Password/onepassword-operator/operator/pkg/apis"
|
||||
"github.com/1Password/onepassword-operator/operator/version"
|
||||
|
||||
"github.com/1Password/connect-sdk-go/connect"
|
||||
|
||||
@@ -40,6 +45,7 @@ import (
|
||||
"sigs.k8s.io/controller-runtime/pkg/manager/signals"
|
||||
)
|
||||
|
||||
const envHostVariable = "OP_CONNECT_HOST"
|
||||
const envPollingIntervalVariable = "POLLING_INTERVAL"
|
||||
const manageConnect = "MANAGE_CONNECT"
|
||||
const restartDeploymentsEnvVariable = "AUTO_RESTART"
|
||||
@@ -83,9 +89,11 @@ func main() {
|
||||
|
||||
printVersion()
|
||||
|
||||
namespace, err := k8sutil.GetWatchNamespace()
|
||||
namespace := os.Getenv(k8sutil.WatchNamespaceEnvVar)
|
||||
|
||||
deploymentNamespace, err := k8sutil.GetOperatorNamespace()
|
||||
if err != nil {
|
||||
log.Error(err, "Failed to get watch namespace")
|
||||
log.Error(err, "Failed to get namespace")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
@@ -139,7 +147,7 @@ func main() {
|
||||
go func() {
|
||||
connectStarted := false
|
||||
for connectStarted == false {
|
||||
err := op.SetupConnect(mgr.GetClient())
|
||||
err := op.SetupConnect(mgr.GetClient(), deploymentNamespace)
|
||||
// Cache Not Started is an acceptable error. Retry until cache is started.
|
||||
if err != nil && !errors.Is(err, &cache.ErrCacheNotStarted{}) {
|
||||
log.Error(err, "")
|
||||
@@ -165,21 +173,28 @@ func main() {
|
||||
// Add the Metrics Service
|
||||
addMetrics(ctx, cfg)
|
||||
|
||||
// Setup update secrets task
|
||||
updatedSecretsPoller := op.NewManager(mgr.GetClient(), opConnectClient, shouldAutoRestartDeployments())
|
||||
_, connectSet := os.LookupEnv(envHostVariable)
|
||||
done := make(chan bool)
|
||||
ticker := time.NewTicker(getPollingIntervalForUpdatingSecrets())
|
||||
go func() {
|
||||
for {
|
||||
select {
|
||||
case <-done:
|
||||
ticker.Stop()
|
||||
return
|
||||
case <-ticker.C:
|
||||
updatedSecretsPoller.UpdateKubernetesSecretsTask()
|
||||
updateSecretsHandler := op.NewManager(mgr.GetClient(), opConnectClient, shouldAutoRestartDeployments())
|
||||
// Setup update secrets task
|
||||
if connectSet {
|
||||
consumeConnectEvents(*updateSecretsHandler)
|
||||
} else {
|
||||
// If not using connect then we will use polling to get secret updates
|
||||
// TODO implement 1Password events-api
|
||||
ticker := time.NewTicker(getPollingIntervalForUpdatingSecrets())
|
||||
go func() {
|
||||
for {
|
||||
select {
|
||||
case <-done:
|
||||
ticker.Stop()
|
||||
return
|
||||
case <-ticker.C:
|
||||
updateSecretsHandler.UpdateKubernetesSecretsTask("", "")
|
||||
}
|
||||
}
|
||||
}
|
||||
}()
|
||||
}()
|
||||
}
|
||||
|
||||
// Start the Cmd
|
||||
if err := mgr.Start(signals.SetupSignalHandler()); err != nil {
|
||||
@@ -298,3 +313,43 @@ func shouldAutoRestartDeployments() bool {
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func consumeConnectEvents(updateSecretsHandler op.SecretUpdateHandler) {
|
||||
log.Info(fmt.Sprintf("Operator Version: %s", version.Version))
|
||||
log.Info("Testing stuff")
|
||||
logger := vlog.Default(vlog.Level(vlog.LogLevelDebug))
|
||||
gwss := websocket.New()
|
||||
locald := local.New()
|
||||
|
||||
port := "42829"
|
||||
if port, err := strconv.Atoi(os.Getenv("OP_BUS_PORT")); err == nil {
|
||||
port = port
|
||||
}
|
||||
|
||||
g := grav.New(
|
||||
grav.UseLogger(logger),
|
||||
grav.UseEndpoint(port, "http://onepassword-connect/meta/message"),
|
||||
grav.UseTransport(gwss),
|
||||
grav.UseDiscovery(locald),
|
||||
)
|
||||
|
||||
pod := g.Connect()
|
||||
pod.OnType(message.TypeItemUpdate, ItemUpdate(updateSecretsHandler))
|
||||
}
|
||||
|
||||
// B5ItemUsage Grav message handler for activity.event messages. On READ
|
||||
// events an update will be sent to the b5 api
|
||||
func ItemUpdate(updateSecretsHandler op.SecretUpdateHandler) grav.MsgFunc {
|
||||
return func(msg grav.Message) error {
|
||||
e := message.ItemUpdateEvent{}
|
||||
if err := msg.UnmarshalData(&e); err != nil {
|
||||
log.Error(err, "failed to UnmarshalData into Event")
|
||||
return nil
|
||||
}
|
||||
|
||||
log.Info(fmt.Sprintf("Detected update for item %s at vault %s", e.ItemId, e.VaultId))
|
||||
updateSecretsHandler.UpdateKubernetesSecretsTask("", "")
|
||||
|
||||
return nil
|
||||
}
|
||||
}
|
@@ -2,7 +2,6 @@ apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: onepassword-connect
|
||||
namespace: default
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
@@ -2,7 +2,6 @@ apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: onepassword-connect
|
||||
namespace: default
|
||||
spec:
|
||||
type: NodePort
|
||||
selector:
|
@@ -1,7 +1,7 @@
|
||||
package apis
|
||||
|
||||
import (
|
||||
v1 "github.com/1Password/onepassword-operator/pkg/apis/onepassword/v1"
|
||||
v1 "github.com/1Password/onepassword-operator/operator/pkg/apis/onepassword/v1"
|
||||
)
|
||||
|
||||
func init() {
|
@@ -1,7 +1,7 @@
|
||||
package controller
|
||||
|
||||
import (
|
||||
"github.com/1Password/onepassword-operator/pkg/controller/deployment"
|
||||
"github.com/1Password/onepassword-operator/operator/pkg/controller/deployment"
|
||||
)
|
||||
|
||||
func init() {
|
@@ -1,7 +1,7 @@
|
||||
package controller
|
||||
|
||||
import (
|
||||
"github.com/1Password/onepassword-operator/pkg/controller/onepassworditem"
|
||||
"github.com/1Password/onepassword-operator/operator/pkg/controller/onepassworditem"
|
||||
)
|
||||
|
||||
func init() {
|
@@ -3,10 +3,12 @@ package deployment
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
kubeSecrets "github.com/1Password/onepassword-operator/pkg/kubernetessecrets"
|
||||
op "github.com/1Password/onepassword-operator/pkg/onepassword"
|
||||
"github.com/1Password/onepassword-operator/pkg/utils"
|
||||
kubeSecrets "github.com/1Password/onepassword-operator/operator/pkg/kubernetessecrets"
|
||||
"github.com/1Password/onepassword-operator/operator/pkg/onepassword"
|
||||
op "github.com/1Password/onepassword-operator/operator/pkg/onepassword"
|
||||
"github.com/1Password/onepassword-operator/operator/pkg/utils"
|
||||
|
||||
"regexp"
|
||||
|
||||
@@ -114,7 +116,7 @@ func (r *ReconcileDeployment) Reconcile(request reconcile.Request) (reconcile.Re
|
||||
}
|
||||
}
|
||||
// Handles creation or updating secrets for deployment if needed
|
||||
if err := r.HandleApplyingDeployment(deployment.Namespace, annotations, request); err != nil {
|
||||
if err := r.HandleApplyingDeployment(deployment, annotations, request); err != nil {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
return reconcile.Result{}, nil
|
||||
@@ -187,10 +189,19 @@ func (r *ReconcileDeployment) removeOnePasswordFinalizerFromDeployment(deploymen
|
||||
return r.kubeClient.Update(context.Background(), deployment)
|
||||
}
|
||||
|
||||
func (r *ReconcileDeployment) HandleApplyingDeployment(namespace string, annotations map[string]string, request reconcile.Request) error {
|
||||
func (r *ReconcileDeployment) HandleApplyingDeployment(deployment *appsv1.Deployment, annotations map[string]string, request reconcile.Request) error {
|
||||
reqLog := log.WithValues("Request.Namespace", request.Namespace, "Request.Name", request.Name)
|
||||
namespace := deployment.Namespace
|
||||
|
||||
// check if deployment is marked to be injected with secrets via the webhook
|
||||
injectedContainers, injected := annotations[op.ContainerInjectAnnotation]
|
||||
if injected {
|
||||
parsedInjectedContainers := strings.Split(injectedContainers, ",")
|
||||
return onepassword.CreateOnePasswordItemResourceFromDeployment(r.opConnectClient, r.kubeClient, deployment, parsedInjectedContainers)
|
||||
}
|
||||
|
||||
secretName := annotations[op.NameAnnotation]
|
||||
secretLabels := map[string]string(nil)
|
||||
if len(secretName) == 0 {
|
||||
reqLog.Info("No 'item-name' annotation set. 'item-path' and 'item-name' must be set as annotations to add new secret.")
|
||||
return nil
|
||||
@@ -201,5 +212,5 @@ func (r *ReconcileDeployment) HandleApplyingDeployment(namespace string, annotat
|
||||
return fmt.Errorf("Failed to retrieve item: %v", err)
|
||||
}
|
||||
|
||||
return kubeSecrets.CreateKubernetesSecretFromItem(r.kubeClient, secretName, namespace, item, annotations[op.RestartDeploymentsAnnotation])
|
||||
return kubeSecrets.CreateKubernetesSecretFromItem(r.kubeClient, secretName, namespace, item, annotations[op.RestartDeploymentsAnnotation], secretLabels, annotations)
|
||||
}
|
@@ -6,8 +6,8 @@ import (
|
||||
"regexp"
|
||||
"testing"
|
||||
|
||||
"github.com/1Password/onepassword-operator/pkg/mocks"
|
||||
op "github.com/1Password/onepassword-operator/pkg/onepassword"
|
||||
"github.com/1Password/onepassword-operator/operator/pkg/mocks"
|
||||
op "github.com/1Password/onepassword-operator/operator/pkg/onepassword"
|
||||
|
||||
"github.com/1Password/connect-sdk-go/onepassword"
|
||||
"github.com/stretchr/testify/assert"
|
||||
@@ -258,7 +258,7 @@ var tests = []testReconcileItem{
|
||||
},
|
||||
},
|
||||
{
|
||||
testName: "Test Do not update if OnePassword Item Version has not changed",
|
||||
testName: "Test Do not update if Annotations have not changed",
|
||||
deploymentResource: &appsv1.Deployment{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: deploymentKind,
|
||||
@@ -271,6 +271,7 @@ var tests = []testReconcileItem{
|
||||
op.ItemPathAnnotation: itemPath,
|
||||
op.NameAnnotation: name,
|
||||
},
|
||||
Labels: map[string]string{},
|
||||
},
|
||||
},
|
||||
existingSecret: &corev1.Secret{
|
||||
@@ -279,6 +280,8 @@ var tests = []testReconcileItem{
|
||||
Namespace: namespace,
|
||||
Annotations: map[string]string{
|
||||
op.VersionAnnotation: fmt.Sprint(version),
|
||||
op.ItemPathAnnotation: itemPath,
|
||||
op.NameAnnotation: name,
|
||||
},
|
||||
},
|
||||
Data: expectedSecretData,
|
||||
@@ -290,7 +293,10 @@ var tests = []testReconcileItem{
|
||||
Namespace: namespace,
|
||||
Annotations: map[string]string{
|
||||
op.VersionAnnotation: fmt.Sprint(version),
|
||||
op.ItemPathAnnotation: itemPath,
|
||||
op.NameAnnotation: name,
|
||||
},
|
||||
Labels: map[string]string(nil),
|
||||
},
|
||||
Data: expectedSecretData,
|
||||
},
|
@@ -4,11 +4,11 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
onepasswordv1 "github.com/1Password/onepassword-operator/pkg/apis/onepassword/v1"
|
||||
kubeSecrets "github.com/1Password/onepassword-operator/pkg/kubernetessecrets"
|
||||
"github.com/1Password/onepassword-operator/pkg/onepassword"
|
||||
op "github.com/1Password/onepassword-operator/pkg/onepassword"
|
||||
"github.com/1Password/onepassword-operator/pkg/utils"
|
||||
onepasswordv1 "github.com/1Password/onepassword-operator/operator/pkg/apis/onepassword/v1"
|
||||
kubeSecrets "github.com/1Password/onepassword-operator/operator/pkg/kubernetessecrets"
|
||||
"github.com/1Password/onepassword-operator/operator/pkg/onepassword"
|
||||
op "github.com/1Password/onepassword-operator/operator/pkg/onepassword"
|
||||
"github.com/1Password/onepassword-operator/operator/pkg/utils"
|
||||
|
||||
"github.com/1Password/connect-sdk-go/connect"
|
||||
|
||||
@@ -144,12 +144,21 @@ func (r *ReconcileOnePasswordItem) removeOnePasswordFinalizerFromOnePasswordItem
|
||||
|
||||
func (r *ReconcileOnePasswordItem) HandleOnePasswordItem(resource *onepasswordv1.OnePasswordItem, request reconcile.Request) error {
|
||||
secretName := resource.GetName()
|
||||
autoRestart := resource.Annotations[op.RestartDeploymentsAnnotation]
|
||||
labels := resource.Labels
|
||||
annotations := resource.Annotations
|
||||
autoRestart := annotations[op.RestartDeploymentsAnnotation]
|
||||
|
||||
// do not create kubernetes secret if the OnePasswordItem was generated
|
||||
// due to secret being injected container via webhook
|
||||
_, injectedSecret := annotations[op.InjectedAnnotation]
|
||||
if injectedSecret {
|
||||
return nil
|
||||
}
|
||||
|
||||
item, err := onepassword.GetOnePasswordItemByPath(r.opConnectClient, resource.Spec.ItemPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Failed to retrieve item: %v", err)
|
||||
}
|
||||
|
||||
return kubeSecrets.CreateKubernetesSecretFromItem(r.kubeClient, secretName, resource.Namespace, item, autoRestart)
|
||||
return kubeSecrets.CreateKubernetesSecretFromItem(r.kubeClient, secretName, resource.Namespace, item, autoRestart, labels, annotations)
|
||||
}
|
@@ -5,10 +5,10 @@ import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/1Password/onepassword-operator/pkg/mocks"
|
||||
op "github.com/1Password/onepassword-operator/pkg/onepassword"
|
||||
"github.com/1Password/onepassword-operator/operator/pkg/mocks"
|
||||
op "github.com/1Password/onepassword-operator/operator/pkg/onepassword"
|
||||
|
||||
onepasswordv1 "github.com/1Password/onepassword-operator/pkg/apis/onepassword/v1"
|
||||
onepasswordv1 "github.com/1Password/onepassword-operator/operator/pkg/apis/onepassword/v1"
|
||||
|
||||
"github.com/1Password/connect-sdk-go/onepassword"
|
||||
"github.com/stretchr/testify/assert"
|
||||
@@ -31,6 +31,9 @@ const (
|
||||
itemId = "nwrhuano7bcwddcviubpp4mhfq"
|
||||
username = "test-user"
|
||||
password = "QmHumKc$mUeEem7caHtbaBaJ"
|
||||
firstHost = "http://localhost:8080"
|
||||
awsKey = "wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY"
|
||||
iceCream = "freezing blue 20%"
|
||||
userKey = "username"
|
||||
passKey = "password"
|
||||
version = 123
|
||||
@@ -116,7 +119,8 @@ var tests = []testReconcileItem{
|
||||
Name: name,
|
||||
Namespace: namespace,
|
||||
Annotations: map[string]string{
|
||||
op.VersionAnnotation: fmt.Sprint(version),
|
||||
op.VersionAnnotation: fmt.Sprint(version),
|
||||
op.ItemPathAnnotation: itemPath,
|
||||
},
|
||||
},
|
||||
Data: expectedSecretData,
|
||||
@@ -127,7 +131,8 @@ var tests = []testReconcileItem{
|
||||
Name: name,
|
||||
Namespace: namespace,
|
||||
Annotations: map[string]string{
|
||||
op.VersionAnnotation: fmt.Sprint(version),
|
||||
op.VersionAnnotation: fmt.Sprint(version),
|
||||
op.ItemPathAnnotation: itemPath,
|
||||
},
|
||||
},
|
||||
Data: expectedSecretData,
|
||||
@@ -147,6 +152,11 @@ var tests = []testReconcileItem{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Namespace: namespace,
|
||||
Annotations: map[string]string{
|
||||
op.VersionAnnotation: fmt.Sprint(version),
|
||||
op.ItemPathAnnotation: itemPath,
|
||||
},
|
||||
Labels: map[string]string{},
|
||||
},
|
||||
Spec: onepasswordv1.OnePasswordItemSpec{
|
||||
ItemPath: itemPath,
|
||||
@@ -157,8 +167,10 @@ var tests = []testReconcileItem{
|
||||
Name: name,
|
||||
Namespace: namespace,
|
||||
Annotations: map[string]string{
|
||||
op.VersionAnnotation: "456",
|
||||
op.VersionAnnotation: "456",
|
||||
op.ItemPathAnnotation: itemPath,
|
||||
},
|
||||
Labels: map[string]string{},
|
||||
},
|
||||
Data: expectedSecretData,
|
||||
},
|
||||
@@ -168,8 +180,10 @@ var tests = []testReconcileItem{
|
||||
Name: name,
|
||||
Namespace: namespace,
|
||||
Annotations: map[string]string{
|
||||
op.VersionAnnotation: fmt.Sprint(version),
|
||||
op.VersionAnnotation: fmt.Sprint(version),
|
||||
op.ItemPathAnnotation: itemPath,
|
||||
},
|
||||
Labels: map[string]string{},
|
||||
},
|
||||
Data: expectedSecretData,
|
||||
},
|
||||
@@ -210,6 +224,120 @@ var tests = []testReconcileItem{
|
||||
passKey: password,
|
||||
},
|
||||
},
|
||||
{
|
||||
testName: "Secret from 1Password item with invalid K8s labels",
|
||||
customResource: &onepasswordv1.OnePasswordItem{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: onePasswordItemKind,
|
||||
APIVersion: onePasswordItemAPIVersion,
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "!my sECReT it3m%",
|
||||
Namespace: namespace,
|
||||
},
|
||||
Spec: onepasswordv1.OnePasswordItemSpec{
|
||||
ItemPath: itemPath,
|
||||
},
|
||||
},
|
||||
existingSecret: nil,
|
||||
expectedError: nil,
|
||||
expectedResultSecret: &corev1.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "my-secret-it3m",
|
||||
Namespace: namespace,
|
||||
Annotations: map[string]string{
|
||||
op.VersionAnnotation: fmt.Sprint(version),
|
||||
},
|
||||
},
|
||||
Data: expectedSecretData,
|
||||
},
|
||||
opItem: map[string]string{
|
||||
userKey: username,
|
||||
passKey: password,
|
||||
},
|
||||
},
|
||||
{
|
||||
testName: "Secret from 1Password item with fields and sections that have invalid K8s labels",
|
||||
customResource: &onepasswordv1.OnePasswordItem{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: onePasswordItemKind,
|
||||
APIVersion: onePasswordItemAPIVersion,
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "!my sECReT it3m%",
|
||||
Namespace: namespace,
|
||||
},
|
||||
Spec: onepasswordv1.OnePasswordItemSpec{
|
||||
ItemPath: itemPath,
|
||||
},
|
||||
},
|
||||
existingSecret: nil,
|
||||
expectedError: nil,
|
||||
expectedResultSecret: &corev1.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "my-secret-it3m",
|
||||
Namespace: namespace,
|
||||
Annotations: map[string]string{
|
||||
op.VersionAnnotation: fmt.Sprint(version),
|
||||
},
|
||||
},
|
||||
Data: map[string][]byte{
|
||||
"password": []byte(password),
|
||||
"username": []byte(username),
|
||||
"first-host": []byte(firstHost),
|
||||
"AWS-Access-Key": []byte(awsKey),
|
||||
"ice-cream-type": []byte(iceCream),
|
||||
},
|
||||
},
|
||||
opItem: map[string]string{
|
||||
userKey: username,
|
||||
passKey: password,
|
||||
"first host": firstHost,
|
||||
"AWS Access Key": awsKey,
|
||||
"😄 ice-cream type": iceCream,
|
||||
},
|
||||
},
|
||||
{
|
||||
testName: "Secret from 1Password item with `-`, `_` and `.`",
|
||||
customResource: &onepasswordv1.OnePasswordItem{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: onePasswordItemKind,
|
||||
APIVersion: onePasswordItemAPIVersion,
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "!.my_sECReT.it3m%-_",
|
||||
Namespace: namespace,
|
||||
},
|
||||
Spec: onepasswordv1.OnePasswordItemSpec{
|
||||
ItemPath: itemPath,
|
||||
},
|
||||
},
|
||||
existingSecret: nil,
|
||||
expectedError: nil,
|
||||
expectedResultSecret: &corev1.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "my-secret.it3m",
|
||||
Namespace: namespace,
|
||||
Annotations: map[string]string{
|
||||
op.VersionAnnotation: fmt.Sprint(version),
|
||||
},
|
||||
},
|
||||
Data: map[string][]byte{
|
||||
"password": []byte(password),
|
||||
"username": []byte(username),
|
||||
"first-host": []byte(firstHost),
|
||||
"AWS-Access-Key": []byte(awsKey),
|
||||
"-_ice_cream.type.": []byte(iceCream),
|
||||
},
|
||||
},
|
||||
opItem: map[string]string{
|
||||
userKey: username,
|
||||
passKey: password,
|
||||
"first host": firstHost,
|
||||
"AWS Access Key": awsKey,
|
||||
"😄 -_ice_cream.type.": iceCream,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
func TestReconcileOnePasswordItem(t *testing.T) {
|
||||
@@ -241,7 +369,10 @@ func TestReconcileOnePasswordItem(t *testing.T) {
|
||||
mocks.GetGetItemFunc = func(uuid string, vaultUUID string) (*onepassword.Item, error) {
|
||||
|
||||
item := onepassword.Item{}
|
||||
item.Fields = generateFields(testData.opItem["username"], testData.opItem["password"])
|
||||
item.Fields = []*onepassword.ItemField{}
|
||||
for k, v := range testData.opItem {
|
||||
item.Fields = append(item.Fields, &onepassword.ItemField{Label: k, Value: v})
|
||||
}
|
||||
item.Version = version
|
||||
item.Vault.ID = vaultUUID
|
||||
item.ID = uuid
|
||||
@@ -257,8 +388,8 @@ func TestReconcileOnePasswordItem(t *testing.T) {
|
||||
// watched resource .
|
||||
req := reconcile.Request{
|
||||
NamespacedName: types.NamespacedName{
|
||||
Name: name,
|
||||
Namespace: namespace,
|
||||
Name: testData.customResource.ObjectMeta.Name,
|
||||
Namespace: testData.customResource.ObjectMeta.Namespace,
|
||||
},
|
||||
}
|
||||
_, err := r.Reconcile(req)
|
@@ -4,12 +4,18 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"regexp"
|
||||
|
||||
"reflect"
|
||||
|
||||
"github.com/1Password/connect-sdk-go/onepassword"
|
||||
"github.com/1Password/onepassword-operator/pkg/utils"
|
||||
"github.com/1Password/onepassword-operator/operator/pkg/utils"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
kubeValidate "k8s.io/apimachinery/pkg/util/validation"
|
||||
|
||||
kubernetesClient "sigs.k8s.io/controller-runtime/pkg/client"
|
||||
logf "sigs.k8s.io/controller-runtime/pkg/log"
|
||||
)
|
||||
@@ -23,22 +29,27 @@ const RestartDeploymentsAnnotation = OnepasswordPrefix + "/auto-restart"
|
||||
|
||||
var log = logf.Log
|
||||
|
||||
func CreateKubernetesSecretFromItem(kubeClient kubernetesClient.Client, secretName, namespace string, item *onepassword.Item, autoRestart string) error {
|
||||
func CreateKubernetesSecretFromItem(kubeClient kubernetesClient.Client, secretName, namespace string, item *onepassword.Item, autoRestart string, labels map[string]string, secretAnnotations map[string]string) error {
|
||||
|
||||
itemVersion := fmt.Sprint(item.Version)
|
||||
annotations := map[string]string{
|
||||
VersionAnnotation: itemVersion,
|
||||
ItemPathAnnotation: fmt.Sprintf("vaults/%v/items/%v", item.Vault.ID, item.ID),
|
||||
|
||||
// If secretAnnotations is nil we create an empty map so we can later assign values for the OP Annotations in the map
|
||||
if secretAnnotations == nil {
|
||||
secretAnnotations = map[string]string{}
|
||||
}
|
||||
|
||||
secretAnnotations[VersionAnnotation] = itemVersion
|
||||
secretAnnotations[ItemPathAnnotation] = fmt.Sprintf("vaults/%v/items/%v", item.Vault.ID, item.ID)
|
||||
|
||||
if autoRestart != "" {
|
||||
_, err := utils.StringToBool(autoRestart)
|
||||
if err != nil {
|
||||
log.Error(err, "Error parsing %v annotation on Secret %v. Must be true or false. Defaulting to false.", RestartDeploymentsAnnotation, secretName)
|
||||
return err
|
||||
}
|
||||
annotations[RestartDeploymentsAnnotation] = autoRestart
|
||||
secretAnnotations[RestartDeploymentsAnnotation] = autoRestart
|
||||
}
|
||||
secret := BuildKubernetesSecretFromOnePasswordItem(secretName, namespace, annotations, *item)
|
||||
secret := BuildKubernetesSecretFromOnePasswordItem(secretName, namespace, secretAnnotations, labels, *item)
|
||||
|
||||
currentSecret := &corev1.Secret{}
|
||||
err := kubeClient.Get(context.Background(), types.NamespacedName{Name: secret.Name, Namespace: secret.Namespace}, currentSecret)
|
||||
@@ -49,9 +60,10 @@ func CreateKubernetesSecretFromItem(kubeClient kubernetesClient.Client, secretNa
|
||||
return err
|
||||
}
|
||||
|
||||
if currentSecret.Annotations[VersionAnnotation] != itemVersion {
|
||||
if !reflect.DeepEqual(currentSecret.Annotations, secretAnnotations) || !reflect.DeepEqual(currentSecret.Labels, labels) {
|
||||
log.Info(fmt.Sprintf("Updating Secret %v at namespace '%v'", secret.Name, secret.Namespace))
|
||||
currentSecret.ObjectMeta.Annotations = annotations
|
||||
currentSecret.ObjectMeta.Annotations = secretAnnotations
|
||||
currentSecret.ObjectMeta.Labels = labels
|
||||
currentSecret.Data = secret.Data
|
||||
return kubeClient.Update(context.Background(), currentSecret)
|
||||
}
|
||||
@@ -60,12 +72,13 @@ func CreateKubernetesSecretFromItem(kubeClient kubernetesClient.Client, secretNa
|
||||
return nil
|
||||
}
|
||||
|
||||
func BuildKubernetesSecretFromOnePasswordItem(name, namespace string, annotations map[string]string, item onepassword.Item) *corev1.Secret {
|
||||
func BuildKubernetesSecretFromOnePasswordItem(name, namespace string, annotations map[string]string, labels map[string]string, item onepassword.Item) *corev1.Secret {
|
||||
return &corev1.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Name: utils.FormatSecretName(name),
|
||||
Namespace: namespace,
|
||||
Annotations: annotations,
|
||||
Labels: labels,
|
||||
},
|
||||
Data: BuildKubernetesSecretData(item.Fields),
|
||||
}
|
||||
@@ -75,8 +88,34 @@ func BuildKubernetesSecretData(fields []*onepassword.ItemField) map[string][]byt
|
||||
secretData := map[string][]byte{}
|
||||
for i := 0; i < len(fields); i++ {
|
||||
if fields[i].Value != "" {
|
||||
secretData[fields[i].Label] = []byte(fields[i].Value)
|
||||
key := formatSecretDataName(fields[i].Label)
|
||||
secretData[key] = []byte(fields[i].Value)
|
||||
}
|
||||
}
|
||||
return secretData
|
||||
}
|
||||
|
||||
// formatSecretDataName rewrites a value to be a valid Secret data key.
|
||||
//
|
||||
// The Secret data keys must consist of alphanumeric numbers, `-`, `_` or `.`
|
||||
// (https://kubernetes.io/docs/concepts/configuration/secret/#overview-of-secrets)
|
||||
func formatSecretDataName(value string) string {
|
||||
if errs := kubeValidate.IsConfigMapKey(value); len(errs) == 0 {
|
||||
return value
|
||||
}
|
||||
return createValidSecretDataName(value)
|
||||
}
|
||||
|
||||
var invalidDataChars = regexp.MustCompile("[^a-zA-Z0-9-._]+")
|
||||
var invalidStartEndChars = regexp.MustCompile("(^[^a-zA-Z0-9-._]+|[^a-zA-Z0-9-._]+$)")
|
||||
|
||||
func createValidSecretDataName(value string) string {
|
||||
result := invalidStartEndChars.ReplaceAllString(value, "")
|
||||
result = invalidDataChars.ReplaceAllString(result, "-")
|
||||
|
||||
if len(result) > kubeValidate.DNS1123SubdomainMaxLength {
|
||||
result = result[0:kubeValidate.DNS1123SubdomainMaxLength]
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
@@ -9,6 +9,7 @@ import (
|
||||
"github.com/1Password/connect-sdk-go/onepassword"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
kubeValidate "k8s.io/apimachinery/pkg/util/validation"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client/fake"
|
||||
)
|
||||
@@ -30,7 +31,11 @@ func TestCreateKubernetesSecretFromOnePasswordItem(t *testing.T) {
|
||||
item.ID = "h46bb3jddvay7nxopfhvlwg35q"
|
||||
|
||||
kubeClient := fake.NewFakeClient()
|
||||
err := CreateKubernetesSecretFromItem(kubeClient, secretName, namespace, &item, restartDeploymentAnnotation)
|
||||
secretLabels := map[string]string{}
|
||||
secretAnnotations := map[string]string{
|
||||
"testAnnotation": "exists",
|
||||
}
|
||||
err := CreateKubernetesSecretFromItem(kubeClient, secretName, namespace, &item, restartDeploymentAnnotation, secretLabels, secretAnnotations)
|
||||
if err != nil {
|
||||
t.Errorf("Unexpected error: %v", err)
|
||||
}
|
||||
@@ -42,6 +47,10 @@ func TestCreateKubernetesSecretFromOnePasswordItem(t *testing.T) {
|
||||
}
|
||||
compareFields(item.Fields, createdSecret.Data, t)
|
||||
compareAnnotationsToItem(createdSecret.Annotations, item, t)
|
||||
|
||||
if createdSecret.Annotations["testAnnotation"] != "exists" {
|
||||
t.Errorf("Expected testAnnotation to be merged with existing annotations, but wasn't.")
|
||||
}
|
||||
}
|
||||
|
||||
func TestUpdateKubernetesSecretFromOnePasswordItem(t *testing.T) {
|
||||
@@ -55,7 +64,9 @@ func TestUpdateKubernetesSecretFromOnePasswordItem(t *testing.T) {
|
||||
item.ID = "h46bb3jddvay7nxopfhvlwg35q"
|
||||
|
||||
kubeClient := fake.NewFakeClient()
|
||||
err := CreateKubernetesSecretFromItem(kubeClient, secretName, namespace, &item, restartDeploymentAnnotation)
|
||||
secretLabels := map[string]string{}
|
||||
secretAnnotations := map[string]string{}
|
||||
err := CreateKubernetesSecretFromItem(kubeClient, secretName, namespace, &item, restartDeploymentAnnotation, secretLabels, secretAnnotations)
|
||||
if err != nil {
|
||||
t.Errorf("Unexpected error: %v", err)
|
||||
}
|
||||
@@ -66,7 +77,7 @@ func TestUpdateKubernetesSecretFromOnePasswordItem(t *testing.T) {
|
||||
newItem.Version = 456
|
||||
newItem.Vault.ID = "hfnjvi6aymbsnfc2xeeoheizda"
|
||||
newItem.ID = "h46bb3jddvay7nxopfhvlwg35q"
|
||||
err = CreateKubernetesSecretFromItem(kubeClient, secretName, namespace, &newItem, restartDeploymentAnnotation)
|
||||
err = CreateKubernetesSecretFromItem(kubeClient, secretName, namespace, &newItem, restartDeploymentAnnotation, secretLabels, secretAnnotations)
|
||||
if err != nil {
|
||||
t.Errorf("Unexpected error: %v", err)
|
||||
}
|
||||
@@ -99,9 +110,10 @@ func TestBuildKubernetesSecretFromOnePasswordItem(t *testing.T) {
|
||||
}
|
||||
item := onepassword.Item{}
|
||||
item.Fields = generateFields(5)
|
||||
labels := map[string]string{}
|
||||
|
||||
kubeSecret := BuildKubernetesSecretFromOnePasswordItem(name, namespace, annotations, item)
|
||||
if kubeSecret.Name != name {
|
||||
kubeSecret := BuildKubernetesSecretFromOnePasswordItem(name, namespace, annotations, labels, item)
|
||||
if kubeSecret.Name != strings.ToLower(name) {
|
||||
t.Errorf("Expected name value: %v but got: %v", name, kubeSecret.Name)
|
||||
}
|
||||
if kubeSecret.Namespace != namespace {
|
||||
@@ -113,6 +125,45 @@ func TestBuildKubernetesSecretFromOnePasswordItem(t *testing.T) {
|
||||
compareFields(item.Fields, kubeSecret.Data, t)
|
||||
}
|
||||
|
||||
func TestBuildKubernetesSecretFixesInvalidLabels(t *testing.T) {
|
||||
name := "inV@l1d k8s secret%name"
|
||||
expectedName := "inv-l1d-k8s-secret-name"
|
||||
namespace := "someNamespace"
|
||||
annotations := map[string]string{
|
||||
"annotationKey": "annotationValue",
|
||||
}
|
||||
labels := map[string]string{}
|
||||
item := onepassword.Item{}
|
||||
|
||||
item.Fields = []*onepassword.ItemField{
|
||||
{
|
||||
Label: "label w%th invalid ch!rs-",
|
||||
Value: "value1",
|
||||
},
|
||||
{
|
||||
Label: strings.Repeat("x", kubeValidate.DNS1123SubdomainMaxLength+1),
|
||||
Value: "name exceeds max length",
|
||||
},
|
||||
}
|
||||
|
||||
kubeSecret := BuildKubernetesSecretFromOnePasswordItem(name, namespace, annotations, labels, item)
|
||||
|
||||
// Assert Secret's meta.name was fixed
|
||||
if kubeSecret.Name != expectedName {
|
||||
t.Errorf("Expected name value: %v but got: %v", name, kubeSecret.Name)
|
||||
}
|
||||
if kubeSecret.Namespace != namespace {
|
||||
t.Errorf("Expected namespace value: %v but got: %v", namespace, kubeSecret.Namespace)
|
||||
}
|
||||
|
||||
// assert labels were fixed for each data key
|
||||
for key := range kubeSecret.Data {
|
||||
if !validLabel(key) {
|
||||
t.Errorf("Expected valid kubernetes label, got %s", key)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func compareAnnotationsToItem(annotations map[string]string, item onepassword.Item, t *testing.T) {
|
||||
actualVaultId, actualItemId, err := ParseVaultIdAndItemIdFromPath(annotations[ItemPathAnnotation])
|
||||
if err != nil {
|
||||
@@ -164,3 +215,10 @@ func ParseVaultIdAndItemIdFromPath(path string) (string, string, error) {
|
||||
}
|
||||
return "", "", fmt.Errorf("%q is not an acceptable path for One Password item. Must be of the format: `vaults/{vault_id}/items/{item_id}`", path)
|
||||
}
|
||||
|
||||
func validLabel(v string) bool {
|
||||
if err := kubeValidate.IsConfigMapKey(v); len(err) > 0 {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
@@ -14,6 +14,8 @@ const (
|
||||
VersionAnnotation = OnepasswordPrefix + "/item-version"
|
||||
RestartAnnotation = OnepasswordPrefix + "/last-restarted"
|
||||
RestartDeploymentsAnnotation = OnepasswordPrefix + "/auto-restart"
|
||||
ContainerInjectAnnotation = OnepasswordPrefix + "/inject"
|
||||
InjectedAnnotation = OnepasswordPrefix + "/injected"
|
||||
)
|
||||
|
||||
func GetAnnotationsForDeployment(deployment *appsv1.Deployment, regex *regexp.Regexp) (map[string]string, bool) {
|
@@ -2,6 +2,7 @@ package onepassword
|
||||
|
||||
import (
|
||||
"context"
|
||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"os"
|
||||
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
@@ -17,13 +18,13 @@ var logConnectSetup = logf.Log.WithName("ConnectSetup")
|
||||
var deploymentPath = "deploy/connect/deployment.yaml"
|
||||
var servicePath = "deploy/connect/service.yaml"
|
||||
|
||||
func SetupConnect(kubeClient client.Client) error {
|
||||
err := setupService(kubeClient, servicePath)
|
||||
func SetupConnect(kubeClient client.Client, deploymentNamespace string) error {
|
||||
err := setupService(kubeClient, servicePath, deploymentNamespace)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = setupDeployment(kubeClient, deploymentPath)
|
||||
err = setupDeployment(kubeClient, deploymentPath, deploymentNamespace)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -31,22 +32,22 @@ func SetupConnect(kubeClient client.Client) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func setupDeployment(kubeClient client.Client, deploymentPath string) error {
|
||||
func setupDeployment(kubeClient client.Client, deploymentPath string, deploymentNamespace string) error {
|
||||
existingDeployment := &appsv1.Deployment{}
|
||||
|
||||
// check if deployment has already been created
|
||||
err := kubeClient.Get(context.Background(), types.NamespacedName{Name: "onepassword-connect", Namespace: "default"}, existingDeployment)
|
||||
err := kubeClient.Get(context.Background(), types.NamespacedName{Name: "onepassword-connect", Namespace: deploymentNamespace}, existingDeployment)
|
||||
if err != nil {
|
||||
if errors.IsNotFound(err) {
|
||||
logConnectSetup.Info("No existing Connect deployment found. Creating Deployment")
|
||||
return createDeployment(kubeClient, deploymentPath)
|
||||
return createDeployment(kubeClient, deploymentPath, deploymentNamespace)
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func createDeployment(kubeClient client.Client, deploymentPath string) error {
|
||||
deployment, err := getDeploymentToCreate(deploymentPath)
|
||||
func createDeployment(kubeClient client.Client, deploymentPath string, deploymentNamespace string) error {
|
||||
deployment, err := getDeploymentToCreate(deploymentPath, deploymentNamespace)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -59,12 +60,16 @@ func createDeployment(kubeClient client.Client, deploymentPath string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func getDeploymentToCreate(deploymentPath string) (*appsv1.Deployment, error) {
|
||||
func getDeploymentToCreate(deploymentPath string, deploymentNamespace string) (*appsv1.Deployment, error) {
|
||||
f, err := os.Open(deploymentPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
deployment := &appsv1.Deployment{}
|
||||
deployment := &appsv1.Deployment{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Namespace: deploymentNamespace,
|
||||
},
|
||||
}
|
||||
|
||||
err = yaml.NewYAMLOrJSONDecoder(f, 4096).Decode(deployment)
|
||||
if err != nil {
|
||||
@@ -73,26 +78,30 @@ func getDeploymentToCreate(deploymentPath string) (*appsv1.Deployment, error) {
|
||||
return deployment, nil
|
||||
}
|
||||
|
||||
func setupService(kubeClient client.Client, servicePath string) error {
|
||||
func setupService(kubeClient client.Client, servicePath string, deploymentNamespace string) error {
|
||||
existingService := &corev1.Service{}
|
||||
|
||||
//check if service has already been created
|
||||
err := kubeClient.Get(context.Background(), types.NamespacedName{Name: "onepassword-connect", Namespace: "default"}, existingService)
|
||||
err := kubeClient.Get(context.Background(), types.NamespacedName{Name: "onepassword-connect", Namespace: deploymentNamespace}, existingService)
|
||||
if err != nil {
|
||||
if errors.IsNotFound(err) {
|
||||
logConnectSetup.Info("No existing Connect service found. Creating Service")
|
||||
return createService(kubeClient, servicePath)
|
||||
return createService(kubeClient, servicePath, deploymentNamespace)
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func createService(kubeClient client.Client, servicePath string) error {
|
||||
func createService(kubeClient client.Client, servicePath string, deploymentNamespace string) error {
|
||||
f, err := os.Open(servicePath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
service := &corev1.Service{}
|
||||
service := &corev1.Service{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Namespace: deploymentNamespace,
|
||||
},
|
||||
}
|
||||
|
||||
err = yaml.NewYAMLOrJSONDecoder(f, 4096).Decode(service)
|
||||
if err != nil {
|
@@ -25,7 +25,7 @@ func TestServiceSetup(t *testing.T) {
|
||||
// Create a fake client to mock API calls.
|
||||
client := fake.NewFakeClientWithScheme(s, objs...)
|
||||
|
||||
err := setupService(client, "../../deploy/connect/service.yaml")
|
||||
err := setupService(client, "../../deploy/connect/service.yaml", defaultNamespacedName.Namespace)
|
||||
|
||||
if err != nil {
|
||||
t.Errorf("Error Setting Up Connect: %v", err)
|
||||
@@ -50,7 +50,7 @@ func TestDeploymentSetup(t *testing.T) {
|
||||
// Create a fake client to mock API calls.
|
||||
client := fake.NewFakeClientWithScheme(s, objs...)
|
||||
|
||||
err := setupDeployment(client, "../../deploy/connect/deployment.yaml")
|
||||
err := setupDeployment(client, "../../deploy/connect/deployment.yaml", defaultNamespacedName.Namespace)
|
||||
|
||||
if err != nil {
|
||||
t.Errorf("Error Setting Up Connect: %v", err)
|
@@ -1,6 +1,10 @@
|
||||
package onepassword
|
||||
|
||||
import corev1 "k8s.io/api/core/v1"
|
||||
import (
|
||||
onepasswordv1 "github.com/1Password/onepassword-operator/operator/pkg/apis/onepassword/v1"
|
||||
"github.com/1Password/onepassword-operator/operator/pkg/utils"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
)
|
||||
|
||||
func AreContainersUsingSecrets(containers []corev1.Container, secrets map[string]*corev1.Secret) bool {
|
||||
for i := 0; i < len(containers); i++ {
|
||||
@@ -31,3 +35,29 @@ func AppendUpdatedContainerSecrets(containers []corev1.Container, secrets map[st
|
||||
}
|
||||
return updatedDeploymentSecrets
|
||||
}
|
||||
|
||||
func AreContainersUsingInjectedSecrets(containers []corev1.Container, injectedContainers []string, items map[string]*onepasswordv1.OnePasswordItem) bool {
|
||||
for _, container := range containers {
|
||||
envVariables := container.Env
|
||||
|
||||
// check if container was set to be injected with secrets
|
||||
for _, injectedContainer := range injectedContainers {
|
||||
if injectedContainer != container.Name {
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
// check if any environment variables are using an updated injected secret
|
||||
for _, envVariable := range envVariables {
|
||||
referenceVault, referenceItem, err := ParseReference(envVariable.Value)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
_, itemFound := items[utils.BuildInjectedOnePasswordItemName(referenceVault, referenceItem)]
|
||||
if itemFound {
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
@@ -1,6 +1,9 @@
|
||||
package onepassword
|
||||
|
||||
import (
|
||||
"strings"
|
||||
|
||||
onepasswordv1 "github.com/1Password/onepassword-operator/operator/pkg/apis/onepassword/v1"
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
)
|
||||
@@ -24,3 +27,14 @@ func GetUpdatedSecretsForDeployment(deployment *appsv1.Deployment, secrets map[s
|
||||
|
||||
return updatedSecretsForDeployment
|
||||
}
|
||||
|
||||
func IsDeploymentUsingInjectedSecrets(deployment *appsv1.Deployment, items map[string]*onepasswordv1.OnePasswordItem) bool {
|
||||
containers := deployment.Spec.Template.Spec.Containers
|
||||
containers = append(containers, deployment.Spec.Template.Spec.InitContainers...)
|
||||
injectedContainers, enabled := deployment.Spec.Template.Annotations[ContainerInjectAnnotation]
|
||||
if !enabled {
|
||||
return false
|
||||
}
|
||||
parsedInjectedContainers := strings.Split(injectedContainers, ",")
|
||||
return AreContainersUsingInjectedSecrets(containers, parsedInjectedContainers, items)
|
||||
}
|
@@ -11,6 +11,16 @@ import (
|
||||
|
||||
var logger = logf.Log.WithName("retrieve_item")
|
||||
|
||||
const secretReferencePrefix = "op://"
|
||||
|
||||
type InvalidOPFormatError struct {
|
||||
Reference string
|
||||
}
|
||||
|
||||
func (e *InvalidOPFormatError) Error() string {
|
||||
return fmt.Sprintf("Invalid secret reference : %s. Secret references should start with op://", e.Reference)
|
||||
}
|
||||
|
||||
func GetOnePasswordItemByPath(opConnectClient connect.Client, path string) (*onepassword.Item, error) {
|
||||
vaultValue, itemValue, err := ParseVaultAndItemFromPath(path)
|
||||
if err != nil {
|
||||
@@ -33,6 +43,30 @@ func GetOnePasswordItemByPath(opConnectClient connect.Client, path string) (*one
|
||||
return item, nil
|
||||
}
|
||||
|
||||
func ParseReference(reference string) (string, string, error) {
|
||||
if !strings.HasPrefix(reference, secretReferencePrefix) {
|
||||
return "", "", &InvalidOPFormatError{Reference: reference}
|
||||
}
|
||||
path := strings.TrimPrefix(reference, secretReferencePrefix)
|
||||
|
||||
splitPath := strings.Split(path, "/")
|
||||
if len(splitPath) != 3 {
|
||||
return "", "", fmt.Errorf("Invalid secret reference : %s. Secret references should match op://<vault>/<item>/<field>", reference)
|
||||
}
|
||||
|
||||
vault := splitPath[0]
|
||||
if vault == "" {
|
||||
return "", "", fmt.Errorf("Invalid secret reference : %s. Vault can't be empty.", reference)
|
||||
}
|
||||
|
||||
item := splitPath[1]
|
||||
if item == "" {
|
||||
return "", "", fmt.Errorf("Invalid secret reference : %s. Item can't be empty.", reference)
|
||||
}
|
||||
|
||||
return vault, item, nil
|
||||
}
|
||||
|
||||
func ParseVaultAndItemFromPath(path string) (string, string, error) {
|
||||
splitPath := strings.Split(path, "/")
|
||||
if len(splitPath) == 4 && splitPath[0] == "vaults" && splitPath[2] == "items" {
|
27
operator/pkg/onepassword/message/item_update.go
Normal file
27
operator/pkg/onepassword/message/item_update.go
Normal file
@@ -0,0 +1,27 @@
|
||||
package message
|
||||
|
||||
import "encoding/json"
|
||||
|
||||
// TypeItemUpdate and others are sync message types
|
||||
const (
|
||||
TypeItemUpdate = "item.update"
|
||||
)
|
||||
|
||||
// ItemUpdateEvent is the data for a sync status message
|
||||
type ItemUpdateEvent struct {
|
||||
VaultId string `json:"vaultId"`
|
||||
ItemId string `json:"itemId"`
|
||||
Version string `json:"version"`
|
||||
}
|
||||
|
||||
// Type returns a the syns status data type
|
||||
func (s *ItemUpdateEvent) Type() string {
|
||||
return TypeItemUpdate
|
||||
}
|
||||
|
||||
// Bytes returns Bytes
|
||||
func (s *ItemUpdateEvent) Bytes() []byte {
|
||||
bytes, _ := json.Marshal(s)
|
||||
|
||||
return bytes
|
||||
}
|
95
operator/pkg/onepassword/onepassword_item.go
Normal file
95
operator/pkg/onepassword/onepassword_item.go
Normal file
@@ -0,0 +1,95 @@
|
||||
package onepassword
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"github.com/1Password/connect-sdk-go/connect"
|
||||
onepasswordv1 "github.com/1Password/onepassword-operator/operator/pkg/apis/onepassword/v1"
|
||||
"github.com/1Password/onepassword-operator/operator/pkg/utils"
|
||||
k8sErrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
kubernetesClient "sigs.k8s.io/controller-runtime/pkg/client"
|
||||
)
|
||||
|
||||
func CreateOnePasswordItemResourceFromDeployment(opClient connect.Client, kubeClient kubernetesClient.Client, deployment *appsv1.Deployment, injectedContainers []string) error {
|
||||
containers := deployment.Spec.Template.Spec.Containers
|
||||
containers = append(containers, deployment.Spec.Template.Spec.InitContainers...)
|
||||
for _, container := range containers {
|
||||
// check if container is listed is one of the containers
|
||||
// set to have injected secrets
|
||||
for _, injectedContainer := range injectedContainers {
|
||||
if injectedContainer != container.Name {
|
||||
continue
|
||||
}
|
||||
// create a one password item custom resource to track updates for injected secrets
|
||||
err := CreateOnePasswordCRSecretsFromContainer(opClient, kubeClient, container, deployment.Namespace)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func CreateOnePasswordCRSecretsFromContainer(opClient connect.Client, kubeClient kubernetesClient.Client, container corev1.Container, namespace string) error {
|
||||
for _, env := range container.Env {
|
||||
// if value is not of format op://<vault>/<item>/<field> then ignore
|
||||
vault, item, err := ParseReference(env.Value)
|
||||
if err != nil {
|
||||
var ev *InvalidOPFormatError
|
||||
if !errors.As(err, &ev) {
|
||||
return err
|
||||
}
|
||||
continue
|
||||
}
|
||||
// create a one password item custom resource to track updates for injected secrets
|
||||
err = CreateOnePasswordCRSecretFromReference(opClient, kubeClient, vault, item, namespace)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func CreateOnePasswordCRSecretFromReference(opClient connect.Client, kubeClient kubernetesClient.Client, vault, item, namespace string) error {
|
||||
|
||||
retrievedItem, err := GetOnePasswordItemByPath(opClient, fmt.Sprintf("vaults/%s/items/%s", vault, item))
|
||||
if err != nil {
|
||||
return fmt.Errorf("Failed to retrieve item: %v", err)
|
||||
}
|
||||
|
||||
name := utils.BuildInjectedOnePasswordItemName(vault, item)
|
||||
onepassworditem := BuildOnePasswordItemCRFromPath(vault, item, name, namespace, fmt.Sprint(retrievedItem.Version))
|
||||
|
||||
currentOnepassworditem := &onepasswordv1.OnePasswordItem{}
|
||||
err = kubeClient.Get(context.Background(), types.NamespacedName{Name: onepassworditem.Name, Namespace: onepassworditem.Namespace}, currentOnepassworditem)
|
||||
if k8sErrors.IsNotFound(err) {
|
||||
log.Info(fmt.Sprintf("Creating OnePasswordItem CR %v at namespace '%v'", onepassworditem.Name, onepassworditem.Namespace))
|
||||
return kubeClient.Create(context.Background(), onepassworditem)
|
||||
} else if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func BuildOnePasswordItemCRFromPath(vault, item, name, namespace, version string) *onepasswordv1.OnePasswordItem {
|
||||
return &onepasswordv1.OnePasswordItem{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Namespace: namespace,
|
||||
Annotations: map[string]string{
|
||||
InjectedAnnotation: "true",
|
||||
VersionAnnotation: version,
|
||||
},
|
||||
},
|
||||
Spec: onepasswordv1.OnePasswordItemSpec{
|
||||
ItemPath: fmt.Sprintf("vaults/%s/items/%s", vault, item),
|
||||
},
|
||||
}
|
||||
}
|
234
operator/pkg/onepassword/onepassword_item_test.go
Normal file
234
operator/pkg/onepassword/onepassword_item_test.go
Normal file
@@ -0,0 +1,234 @@
|
||||
package onepassword
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/1Password/onepassword-operator/operator/pkg/mocks"
|
||||
|
||||
"github.com/1Password/connect-sdk-go/onepassword"
|
||||
onepasswordv1 "github.com/1Password/onepassword-operator/operator/pkg/apis/onepassword/v1"
|
||||
"github.com/stretchr/testify/assert"
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
errors2 "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/kubectl/pkg/scheme"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client/fake"
|
||||
)
|
||||
|
||||
type onepassworditemInjections struct {
|
||||
testName string
|
||||
existingDeployment *appsv1.Deployment
|
||||
existingNamespace *corev1.Namespace
|
||||
expectedError error
|
||||
expectedEvents []string
|
||||
opItem map[string]string
|
||||
expectedOPItem *onepasswordv1.OnePasswordItem
|
||||
}
|
||||
|
||||
var onepassworditemTests = []onepassworditemInjections{
|
||||
{
|
||||
testName: "Try to Create OnePasswordItem with container with valid op reference",
|
||||
existingNamespace: defaultNamespace,
|
||||
existingDeployment: &appsv1.Deployment{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: deploymentKind,
|
||||
APIVersion: deploymentAPIVersion,
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Namespace: namespace,
|
||||
},
|
||||
Spec: appsv1.DeploymentSpec{
|
||||
Template: corev1.PodTemplateSpec{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Namespace: namespace,
|
||||
Annotations: map[string]string{
|
||||
ContainerInjectAnnotation: "test-app",
|
||||
},
|
||||
},
|
||||
Spec: corev1.PodSpec{
|
||||
Containers: []corev1.Container{
|
||||
{
|
||||
Name: "test-app",
|
||||
Env: []corev1.EnvVar{
|
||||
{
|
||||
Name: name,
|
||||
Value: fmt.Sprintf("op://%s/%s/test", vaultId, itemId),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedError: nil,
|
||||
opItem: map[string]string{
|
||||
userKey: username,
|
||||
passKey: password,
|
||||
},
|
||||
expectedOPItem: &onepasswordv1.OnePasswordItem{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "OnePasswordItem",
|
||||
APIVersion: "onepassword.com/v1",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: injectedOnePasswordItemName,
|
||||
Namespace: namespace,
|
||||
Annotations: map[string]string{
|
||||
InjectedAnnotation: "true",
|
||||
VersionAnnotation: "old",
|
||||
},
|
||||
},
|
||||
Spec: onepasswordv1.OnePasswordItemSpec{
|
||||
ItemPath: itemPath,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
testName: "Container with no op:// reference does not create OnePasswordItem",
|
||||
existingNamespace: defaultNamespace,
|
||||
existingDeployment: &appsv1.Deployment{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: deploymentKind,
|
||||
APIVersion: deploymentAPIVersion,
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Namespace: namespace,
|
||||
},
|
||||
Spec: appsv1.DeploymentSpec{
|
||||
Template: corev1.PodTemplateSpec{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Namespace: namespace,
|
||||
Annotations: map[string]string{
|
||||
ContainerInjectAnnotation: "test-app",
|
||||
},
|
||||
},
|
||||
Spec: corev1.PodSpec{
|
||||
Containers: []corev1.Container{
|
||||
{
|
||||
Name: "test-app",
|
||||
Env: []corev1.EnvVar{
|
||||
{
|
||||
Name: name,
|
||||
Value: fmt.Sprintf("some value"),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedError: nil,
|
||||
opItem: map[string]string{
|
||||
userKey: username,
|
||||
passKey: password,
|
||||
},
|
||||
expectedOPItem: nil,
|
||||
},
|
||||
{
|
||||
testName: "Container with op:// reference missing vault and item does not create OnePasswordItem and returns error",
|
||||
existingNamespace: defaultNamespace,
|
||||
existingDeployment: &appsv1.Deployment{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: deploymentKind,
|
||||
APIVersion: deploymentAPIVersion,
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Namespace: namespace,
|
||||
},
|
||||
Spec: appsv1.DeploymentSpec{
|
||||
Template: corev1.PodTemplateSpec{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Namespace: namespace,
|
||||
Annotations: map[string]string{
|
||||
ContainerInjectAnnotation: "test-app",
|
||||
},
|
||||
},
|
||||
Spec: corev1.PodSpec{
|
||||
Containers: []corev1.Container{
|
||||
{
|
||||
Name: "test-app",
|
||||
Env: []corev1.EnvVar{
|
||||
{
|
||||
Name: name,
|
||||
Value: fmt.Sprintf("op://"),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedError: fmt.Errorf("Invalid secret reference : %s. Secret references should match op://<vault>/<item>/<field>", "op://"),
|
||||
opItem: map[string]string{
|
||||
userKey: username,
|
||||
passKey: password,
|
||||
},
|
||||
expectedOPItem: nil,
|
||||
},
|
||||
}
|
||||
|
||||
func TestOnePasswordItemSecretInjected(t *testing.T) {
|
||||
for _, testData := range onepassworditemTests {
|
||||
t.Run(testData.testName, func(t *testing.T) {
|
||||
|
||||
// Register operator types with the runtime scheme.
|
||||
s := scheme.Scheme
|
||||
s.AddKnownTypes(appsv1.SchemeGroupVersion, &onepasswordv1.OnePasswordItem{}, &onepasswordv1.OnePasswordItemList{}, &appsv1.Deployment{})
|
||||
|
||||
// Objects to track in the fake client.
|
||||
objs := []runtime.Object{
|
||||
testData.existingDeployment,
|
||||
testData.existingNamespace,
|
||||
}
|
||||
|
||||
// Create a fake client to mock API calls.
|
||||
cl := fake.NewFakeClientWithScheme(s, objs...)
|
||||
|
||||
opConnectClient := &mocks.TestClient{}
|
||||
mocks.GetGetItemFunc = func(uuid string, vaultUUID string) (*onepassword.Item, error) {
|
||||
|
||||
item := onepassword.Item{}
|
||||
item.Fields = generateFields(testData.opItem["username"], testData.opItem["password"])
|
||||
item.Version = itemVersion
|
||||
item.Vault.ID = vaultUUID
|
||||
item.ID = uuid
|
||||
return &item, nil
|
||||
}
|
||||
|
||||
injectedContainers := testData.existingDeployment.Spec.Template.ObjectMeta.Annotations[ContainerInjectAnnotation]
|
||||
parsedInjectedContainers := strings.Split(injectedContainers, ",")
|
||||
err := CreateOnePasswordItemResourceFromDeployment(opConnectClient, cl, testData.existingDeployment, parsedInjectedContainers)
|
||||
|
||||
assert.Equal(t, testData.expectedError, err)
|
||||
|
||||
// Check if Secret has been created and has the correct data
|
||||
opItemCR := &onepasswordv1.OnePasswordItem{}
|
||||
err = cl.Get(context.TODO(), types.NamespacedName{Name: injectedOnePasswordItemName, Namespace: namespace}, opItemCR)
|
||||
|
||||
if testData.expectedOPItem == nil {
|
||||
assert.Error(t, err)
|
||||
assert.True(t, errors2.IsNotFound(err))
|
||||
} else {
|
||||
assert.Equal(t, testData.expectedOPItem.Spec.ItemPath, opItemCR.Spec.ItemPath)
|
||||
assert.Equal(t, testData.expectedOPItem.Name, opItemCR.Name)
|
||||
assert.Equal(t, testData.expectedOPItem.Annotations[InjectedAnnotation], opItemCR.Annotations[InjectedAnnotation])
|
||||
}
|
||||
|
||||
})
|
||||
}
|
||||
}
|
@@ -5,8 +5,9 @@ import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
kubeSecrets "github.com/1Password/onepassword-operator/pkg/kubernetessecrets"
|
||||
"github.com/1Password/onepassword-operator/pkg/utils"
|
||||
onepasswordv1 "github.com/1Password/onepassword-operator/operator/pkg/apis/onepassword/v1"
|
||||
kubeSecrets "github.com/1Password/onepassword-operator/operator/pkg/kubernetessecrets"
|
||||
"github.com/1Password/onepassword-operator/operator/pkg/utils"
|
||||
|
||||
"github.com/1Password/connect-sdk-go/connect"
|
||||
"github.com/1Password/connect-sdk-go/onepassword"
|
||||
@@ -35,18 +36,23 @@ type SecretUpdateHandler struct {
|
||||
shouldAutoRestartDeploymentsGlobal bool
|
||||
}
|
||||
|
||||
func (h *SecretUpdateHandler) UpdateKubernetesSecretsTask() error {
|
||||
updatedKubernetesSecrets, err := h.updateKubernetesSecrets()
|
||||
func (h *SecretUpdateHandler) UpdateKubernetesSecretsTask(vaultId, itemId string) error {
|
||||
updatedKubernetesSecrets, err := h.updateKubernetesSecrets(vaultId, itemId)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return h.restartDeploymentsWithUpdatedSecrets(updatedKubernetesSecrets)
|
||||
updatedInjectedSecrets, err := h.updateInjectedSecrets(vaultId, itemId)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return h.restartDeploymentsWithUpdatedSecrets(updatedKubernetesSecrets, updatedInjectedSecrets)
|
||||
}
|
||||
|
||||
func (h *SecretUpdateHandler) restartDeploymentsWithUpdatedSecrets(updatedSecretsByNamespace map[string]map[string]*corev1.Secret) error {
|
||||
// No secrets to update. Exit
|
||||
if len(updatedSecretsByNamespace) == 0 || updatedSecretsByNamespace == nil {
|
||||
func (h *SecretUpdateHandler) restartDeploymentsWithUpdatedSecrets(updatedSecretsByNamespace map[string]map[string]*corev1.Secret, updatedInjectedSecretsByNamespace map[string]map[string]*onepasswordv1.OnePasswordItem) error {
|
||||
|
||||
if len(updatedSecretsByNamespace) == 0 && len(updatedInjectedSecretsByNamespace) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -63,6 +69,7 @@ func (h *SecretUpdateHandler) restartDeploymentsWithUpdatedSecrets(updatedSecret
|
||||
|
||||
setForAutoRestartByNamespaceMap, err := h.getIsSetForAutoRestartByNamespaceMap()
|
||||
if err != nil {
|
||||
log.Error(err, "Error determining which namespaces allow restarts")
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -70,17 +77,24 @@ func (h *SecretUpdateHandler) restartDeploymentsWithUpdatedSecrets(updatedSecret
|
||||
deployment := &deployments.Items[i]
|
||||
updatedSecrets := updatedSecretsByNamespace[deployment.Namespace]
|
||||
|
||||
// check if deployment is using one of the updated secrets
|
||||
updatedDeploymentSecrets := GetUpdatedSecretsForDeployment(deployment, updatedSecrets)
|
||||
if len(updatedDeploymentSecrets) == 0 {
|
||||
continue
|
||||
}
|
||||
for _, secret := range updatedDeploymentSecrets {
|
||||
if isSecretSetForAutoRestart(secret, deployment, setForAutoRestartByNamespaceMap) {
|
||||
h.restartDeployment(deployment)
|
||||
continue
|
||||
if len(updatedDeploymentSecrets) != 0 {
|
||||
for _, secret := range updatedDeploymentSecrets {
|
||||
if isSecretSetForAutoRestart(secret, deployment, setForAutoRestartByNamespaceMap) {
|
||||
h.restartDeployment(deployment)
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// check if the deployment is using one of the updated injected secrets
|
||||
updatedInjection := IsDeploymentUsingInjectedSecrets(deployment, updatedInjectedSecretsByNamespace[deployment.Namespace])
|
||||
if updatedInjection && isDeploymentSetForAutoRestart(deployment, setForAutoRestartByNamespaceMap) {
|
||||
h.restartDeployment(deployment)
|
||||
continue
|
||||
}
|
||||
|
||||
log.Info(fmt.Sprintf("Deployment %q at namespace %q is up to date", deployment.GetName(), deployment.Namespace))
|
||||
|
||||
}
|
||||
@@ -89,16 +103,17 @@ func (h *SecretUpdateHandler) restartDeploymentsWithUpdatedSecrets(updatedSecret
|
||||
|
||||
func (h *SecretUpdateHandler) restartDeployment(deployment *appsv1.Deployment) {
|
||||
log.Info(fmt.Sprintf("Deployment %q at namespace %q references an updated secret. Restarting", deployment.GetName(), deployment.Namespace))
|
||||
deployment.Spec.Template.Annotations = map[string]string{
|
||||
RestartAnnotation: time.Now().String(),
|
||||
if deployment.Spec.Template.Annotations == nil {
|
||||
deployment.Spec.Template.Annotations = map[string]string{}
|
||||
}
|
||||
deployment.Spec.Template.Annotations[RestartAnnotation] = time.Now().String()
|
||||
err := h.client.Update(context.Background(), deployment)
|
||||
if err != nil {
|
||||
log.Error(err, "Problem restarting deployment")
|
||||
}
|
||||
}
|
||||
|
||||
func (h *SecretUpdateHandler) updateKubernetesSecrets() (map[string]map[string]*corev1.Secret, error) {
|
||||
func (h *SecretUpdateHandler) updateKubernetesSecrets(vaultId, itemId string) (map[string]map[string]*corev1.Secret, error) {
|
||||
secrets := &corev1.SecretList{}
|
||||
err := h.client.List(context.Background(), secrets)
|
||||
if err != nil {
|
||||
@@ -111,11 +126,16 @@ func (h *SecretUpdateHandler) updateKubernetesSecrets() (map[string]map[string]*
|
||||
secret := secrets.Items[i]
|
||||
|
||||
itemPath := secret.Annotations[ItemPathAnnotation]
|
||||
|
||||
currentVersion := secret.Annotations[VersionAnnotation]
|
||||
if len(itemPath) == 0 || len(currentVersion) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
if vaultId != "" && itemId != "" && itemPath != fmt.Sprintf("vaults/%s/items%s", vaultId, itemId) {
|
||||
continue
|
||||
}
|
||||
|
||||
item, err := GetOnePasswordItemByPath(h.opConnectClient, secret.Annotations[ItemPathAnnotation])
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Failed to retrieve item: %v", err)
|
||||
@@ -131,7 +151,7 @@ func (h *SecretUpdateHandler) updateKubernetesSecrets() (map[string]map[string]*
|
||||
}
|
||||
log.Info(fmt.Sprintf("Updating kubernetes secret '%v'", secret.GetName()))
|
||||
secret.Annotations[VersionAnnotation] = itemVersion
|
||||
updatedSecret := kubeSecrets.BuildKubernetesSecretFromOnePasswordItem(secret.Name, secret.Namespace, secret.Annotations, *item)
|
||||
updatedSecret := kubeSecrets.BuildKubernetesSecretFromOnePasswordItem(secret.Name, secret.Namespace, secret.Annotations, secret.Labels, *item)
|
||||
h.client.Update(context.Background(), updatedSecret)
|
||||
if updatedSecrets[secret.Namespace] == nil {
|
||||
updatedSecrets[secret.Namespace] = make(map[string]*corev1.Secret)
|
||||
@@ -142,6 +162,55 @@ func (h *SecretUpdateHandler) updateKubernetesSecrets() (map[string]map[string]*
|
||||
return updatedSecrets, nil
|
||||
}
|
||||
|
||||
func (h *SecretUpdateHandler) updateInjectedSecrets(vaultId, itemId string) (map[string]map[string]*onepasswordv1.OnePasswordItem, error) {
|
||||
// fetch all onepassworditems
|
||||
onepasswordItems := &onepasswordv1.OnePasswordItemList{}
|
||||
err := h.client.List(context.Background(), onepasswordItems)
|
||||
if err != nil {
|
||||
log.Error(err, "Failed to list OnePasswordItems")
|
||||
return nil, err
|
||||
}
|
||||
|
||||
updatedItems := map[string]map[string]*onepasswordv1.OnePasswordItem{}
|
||||
for _, item := range onepasswordItems.Items {
|
||||
|
||||
// if onepassworditem was not generated by injecting a secret into a deployment then ignore
|
||||
_, injected := item.Annotations[InjectedAnnotation]
|
||||
if !injected {
|
||||
continue
|
||||
}
|
||||
|
||||
itemPath := item.Spec.ItemPath
|
||||
currentVersion := item.Annotations[VersionAnnotation]
|
||||
if len(itemPath) == 0 || len(currentVersion) == 0 {
|
||||
continue
|
||||
}
|
||||
if vaultId != "" && itemId != "" && itemPath != fmt.Sprintf("vaults/%s/items%s", vaultId, itemId) {
|
||||
continue
|
||||
}
|
||||
|
||||
storedItem, err := GetOnePasswordItemByPath(h.opConnectClient, itemPath)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Failed to retrieve item: %v", err)
|
||||
}
|
||||
|
||||
itemVersion := fmt.Sprint(storedItem.Version)
|
||||
if currentVersion != itemVersion {
|
||||
item.Annotations[VersionAnnotation] = itemVersion
|
||||
h.client.Update(context.Background(), &item)
|
||||
if isItemLockedForForcedRestarts(storedItem) {
|
||||
log.Info(fmt.Sprintf("Secret '%v' has been updated in 1Password but is set to be ignored. Updates to an ignored secret will not trigger an update to a OnePasswordItem secret or a rolling restart.", item.Name))
|
||||
continue
|
||||
}
|
||||
if updatedItems[item.Namespace] == nil {
|
||||
updatedItems[item.Namespace] = make(map[string]*onepasswordv1.OnePasswordItem)
|
||||
}
|
||||
updatedItems[item.Namespace][item.Name] = &item
|
||||
}
|
||||
}
|
||||
return updatedItems, nil
|
||||
}
|
||||
|
||||
func isItemLockedForForcedRestarts(item *onepassword.Item) bool {
|
||||
tags := item.Tags
|
||||
for i := 0; i < len(tags); i++ {
|
||||
@@ -178,7 +247,7 @@ func (h *SecretUpdateHandler) getIsSetForAutoRestartByNamespaceMap() (map[string
|
||||
|
||||
func isSecretSetForAutoRestart(secret *corev1.Secret, deployment *appsv1.Deployment, setForAutoRestartByNamespace map[string]bool) bool {
|
||||
restartDeployment := secret.Annotations[RestartDeploymentsAnnotation]
|
||||
//If annotation for auto restarts for deployment is not set. Check for the annotation on its namepsace
|
||||
//If annotation for auto restarts for deployment is not set. Check for the annotation on its deployment
|
||||
if restartDeployment == "" {
|
||||
return isDeploymentSetForAutoRestart(deployment, setForAutoRestartByNamespace)
|
||||
}
|
@@ -5,9 +5,10 @@ import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/1Password/onepassword-operator/pkg/mocks"
|
||||
"github.com/1Password/onepassword-operator/operator/pkg/mocks"
|
||||
|
||||
"github.com/1Password/connect-sdk-go/onepassword"
|
||||
onepasswordv1 "github.com/1Password/onepassword-operator/operator/pkg/apis/onepassword/v1"
|
||||
"github.com/stretchr/testify/assert"
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
@@ -20,23 +21,25 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
deploymentKind = "Deployment"
|
||||
deploymentAPIVersion = "v1"
|
||||
name = "test-deployment"
|
||||
namespace = "default"
|
||||
vaultId = "hfnjvi6aymbsnfc2xeeoheizda"
|
||||
itemId = "nwrhuano7bcwddcviubpp4mhfq"
|
||||
username = "test-user"
|
||||
password = "QmHumKc$mUeEem7caHtbaBaJ"
|
||||
userKey = "username"
|
||||
passKey = "password"
|
||||
itemVersion = 123
|
||||
deploymentKind = "Deployment"
|
||||
deploymentAPIVersion = "v1"
|
||||
name = "test-deployment"
|
||||
namespace = "default"
|
||||
vaultId = "hfnjvi6aymbsnfc2xeeoheizda"
|
||||
itemId = "nwrhuano7bcwddcviubpp4mhfq"
|
||||
username = "test-user"
|
||||
password = "QmHumKc$mUeEem7caHtbaBaJ"
|
||||
userKey = "username"
|
||||
passKey = "password"
|
||||
itemVersion = 123
|
||||
injectedOnePasswordItemName = "injectedsecret-" + vaultId + "-" + itemId
|
||||
)
|
||||
|
||||
type testUpdateSecretTask struct {
|
||||
testName string
|
||||
existingDeployment *appsv1.Deployment
|
||||
existingNamespace *corev1.Namespace
|
||||
existingOnePasswordItem *onepasswordv1.OnePasswordItem
|
||||
existingSecret *corev1.Secret
|
||||
expectedError error
|
||||
expectedResultSecret *corev1.Secret
|
||||
@@ -755,6 +758,123 @@ var tests = []testUpdateSecretTask{
|
||||
expectedRestart: true,
|
||||
globalAutoRestartEnabled: false,
|
||||
},
|
||||
{
|
||||
testName: "OP item has new version. Secret needs update. Deployment is restarted based on injected secrets in containers",
|
||||
existingNamespace: defaultNamespace,
|
||||
existingDeployment: &appsv1.Deployment{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: deploymentKind,
|
||||
APIVersion: deploymentAPIVersion,
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Namespace: namespace,
|
||||
},
|
||||
Spec: appsv1.DeploymentSpec{
|
||||
Template: corev1.PodTemplateSpec{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Namespace: namespace,
|
||||
Annotations: map[string]string{
|
||||
ContainerInjectAnnotation: "test-app",
|
||||
},
|
||||
},
|
||||
Spec: corev1.PodSpec{
|
||||
Containers: []corev1.Container{
|
||||
{
|
||||
Name: "test-app",
|
||||
Env: []corev1.EnvVar{
|
||||
{
|
||||
Name: name,
|
||||
Value: fmt.Sprintf("op://%s/%s/test", vaultId, itemId),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
existingOnePasswordItem: &onepasswordv1.OnePasswordItem{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "OnePasswordItem",
|
||||
APIVersion: "onepassword.com/v1",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: injectedOnePasswordItemName,
|
||||
Namespace: namespace,
|
||||
Annotations: map[string]string{
|
||||
InjectedAnnotation: "true",
|
||||
VersionAnnotation: "old",
|
||||
},
|
||||
},
|
||||
Spec: onepasswordv1.OnePasswordItemSpec{
|
||||
ItemPath: itemPath,
|
||||
},
|
||||
},
|
||||
expectedError: nil,
|
||||
opItem: map[string]string{
|
||||
userKey: username,
|
||||
passKey: password,
|
||||
},
|
||||
expectedRestart: true,
|
||||
globalAutoRestartEnabled: true,
|
||||
},
|
||||
{
|
||||
testName: "OP item has new version. Secret needs update. Deployment does not have a inject annotation",
|
||||
existingNamespace: defaultNamespace,
|
||||
existingDeployment: &appsv1.Deployment{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: deploymentKind,
|
||||
APIVersion: deploymentAPIVersion,
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Namespace: namespace,
|
||||
},
|
||||
Spec: appsv1.DeploymentSpec{
|
||||
Template: corev1.PodTemplateSpec{
|
||||
Spec: corev1.PodSpec{
|
||||
Containers: []corev1.Container{
|
||||
{
|
||||
Name: "test-app",
|
||||
Env: []corev1.EnvVar{
|
||||
{
|
||||
Name: name,
|
||||
Value: fmt.Sprintf("op://%s/%s/test", vaultId, itemId),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
existingOnePasswordItem: &onepasswordv1.OnePasswordItem{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "OnePasswordItem",
|
||||
APIVersion: "onepassword.com/v1",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: fmt.Sprintf("%s-%s", vaultId, itemId),
|
||||
Namespace: namespace,
|
||||
Annotations: map[string]string{
|
||||
InjectedAnnotation: "true",
|
||||
VersionAnnotation: "old",
|
||||
},
|
||||
},
|
||||
Spec: onepasswordv1.OnePasswordItemSpec{
|
||||
ItemPath: itemPath,
|
||||
},
|
||||
},
|
||||
expectedError: nil,
|
||||
opItem: map[string]string{
|
||||
userKey: username,
|
||||
passKey: password,
|
||||
},
|
||||
expectedRestart: false,
|
||||
globalAutoRestartEnabled: true,
|
||||
},
|
||||
}
|
||||
|
||||
func TestUpdateSecretHandler(t *testing.T) {
|
||||
@@ -763,7 +883,7 @@ func TestUpdateSecretHandler(t *testing.T) {
|
||||
|
||||
// Register operator types with the runtime scheme.
|
||||
s := scheme.Scheme
|
||||
s.AddKnownTypes(appsv1.SchemeGroupVersion, testData.existingDeployment)
|
||||
s.AddKnownTypes(appsv1.SchemeGroupVersion, &onepasswordv1.OnePasswordItem{}, &onepasswordv1.OnePasswordItemList{}, &appsv1.Deployment{})
|
||||
|
||||
// Objects to track in the fake client.
|
||||
objs := []runtime.Object{
|
||||
@@ -775,6 +895,10 @@ func TestUpdateSecretHandler(t *testing.T) {
|
||||
objs = append(objs, testData.existingSecret)
|
||||
}
|
||||
|
||||
if testData.existingOnePasswordItem != nil {
|
||||
objs = append(objs, testData.existingOnePasswordItem)
|
||||
}
|
||||
|
||||
// Create a fake client to mock API calls.
|
||||
cl := fake.NewFakeClientWithScheme(s, objs...)
|
||||
|
||||
@@ -794,7 +918,7 @@ func TestUpdateSecretHandler(t *testing.T) {
|
||||
shouldAutoRestartDeploymentsGlobal: testData.globalAutoRestartEnabled,
|
||||
}
|
||||
|
||||
err := h.UpdateKubernetesSecretsTask()
|
||||
err := h.UpdateKubernetesSecretsTask("", "")
|
||||
|
||||
assert.Equal(t, testData.expectedError, err)
|
||||
|
||||
@@ -825,9 +949,9 @@ func TestUpdateSecretHandler(t *testing.T) {
|
||||
|
||||
_, ok := deployment.Spec.Template.Annotations[RestartAnnotation]
|
||||
if ok {
|
||||
assert.True(t, testData.expectedRestart, "Expected deployment to restart but it did not")
|
||||
assert.True(t, testData.expectedRestart, "Deployment was restarted but should not have been.")
|
||||
} else {
|
||||
assert.False(t, testData.expectedRestart, "Deployment was restarted but should not have been.")
|
||||
assert.False(t, testData.expectedRestart, "Expected deployment to restart but it did not")
|
||||
}
|
||||
})
|
||||
}
|
66
operator/pkg/utils/string.go
Normal file
66
operator/pkg/utils/string.go
Normal file
@@ -0,0 +1,66 @@
|
||||
package utils
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
kubeValidate "k8s.io/apimachinery/pkg/util/validation"
|
||||
)
|
||||
|
||||
var invalidDNS1123Chars = regexp.MustCompile("[^a-z0-9-.]+")
|
||||
|
||||
func ContainsString(slice []string, s string) bool {
|
||||
for _, item := range slice {
|
||||
if item == s {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func RemoveString(slice []string, s string) (result []string) {
|
||||
for _, item := range slice {
|
||||
if item == s {
|
||||
continue
|
||||
}
|
||||
result = append(result, item)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func StringToBool(str string) (bool, error) {
|
||||
restartDeploymentBool, err := strconv.ParseBool(strings.ToLower(str))
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return restartDeploymentBool, nil
|
||||
}
|
||||
|
||||
// formatSecretName rewrites a value to be a valid Secret name.
|
||||
//
|
||||
// The Secret meta.name and data keys must be valid DNS subdomain names
|
||||
// (https://kubernetes.io/docs/concepts/configuration/secret/#overview-of-secrets)
|
||||
func FormatSecretName(value string) string {
|
||||
if errs := kubeValidate.IsDNS1123Subdomain(value); len(errs) == 0 {
|
||||
return value
|
||||
}
|
||||
return CreateValidSecretName(value)
|
||||
}
|
||||
|
||||
func CreateValidSecretName(value string) string {
|
||||
result := strings.ToLower(value)
|
||||
result = invalidDNS1123Chars.ReplaceAllString(result, "-")
|
||||
|
||||
if len(result) > kubeValidate.DNS1123SubdomainMaxLength {
|
||||
result = result[0:kubeValidate.DNS1123SubdomainMaxLength]
|
||||
}
|
||||
|
||||
// first and last character MUST be alphanumeric
|
||||
return strings.Trim(result, "-.")
|
||||
}
|
||||
|
||||
func BuildInjectedOnePasswordItemName(vaultId, injectedId string) string {
|
||||
return FormatSecretName(fmt.Sprintf("injectedsecret-%s-%s", vaultId, injectedId))
|
||||
}
|
@@ -1,33 +0,0 @@
|
||||
package utils
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
func ContainsString(slice []string, s string) bool {
|
||||
for _, item := range slice {
|
||||
if item == s {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func RemoveString(slice []string, s string) (result []string) {
|
||||
for _, item := range slice {
|
||||
if item == s {
|
||||
continue
|
||||
}
|
||||
result = append(result, item)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func StringToBool(str string) (bool, error) {
|
||||
restartDeploymentBool, err := strconv.ParseBool(strings.ToLower(str))
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return restartDeploymentBool, nil
|
||||
}
|
30
secret-injector/Dockerfile
Normal file
30
secret-injector/Dockerfile
Normal file
@@ -0,0 +1,30 @@
|
||||
# Build the manager binary
|
||||
FROM golang:1.13 as builder
|
||||
|
||||
WORKDIR /workspace
|
||||
# Copy the Go Modules manifests
|
||||
COPY go.mod go.mod
|
||||
COPY go.sum go.sum
|
||||
|
||||
# Copy the go source
|
||||
COPY secret-injector/cmd/main.go secret-injector/main.go
|
||||
COPY secret-injector/pkg/ secret-injector/pkg/
|
||||
COPY vendor/ vendor/
|
||||
# Build
|
||||
ARG secret_injector_version=dev
|
||||
RUN CGO_ENABLED=0 \
|
||||
GO111MODULE=on \
|
||||
go build \
|
||||
-ldflags "-X \"github.com/1Password/onepassword-operator/operator/version.Version=$secret_injector_version\"" \
|
||||
-mod vendor \
|
||||
-a -o injector secret-injector/main.go
|
||||
|
||||
# Use distroless as minimal base image to package the secret-injector binary
|
||||
# Refer to https://github.com/GoogleContainerTools/distroless for more details
|
||||
FROM gcr.io/distroless/static:nonroot
|
||||
WORKDIR /
|
||||
COPY --from=builder /workspace/injector .
|
||||
USER nonroot:nonroot
|
||||
|
||||
ENTRYPOINT ["/injector"]
|
||||
|
98
secret-injector/Makefile
Normal file
98
secret-injector/Makefile
Normal file
@@ -0,0 +1,98 @@
|
||||
# Image URL to use all building/pushing image targets;
|
||||
# Use your own docker registry and image name for dev/test by overridding the
|
||||
# IMAGE_REPO, IMAGE_NAME and IMAGE_TAG environment variable.
|
||||
IMAGE_REPO ?= docker.io/morvencao
|
||||
IMAGE_NAME ?= op-secret-injector
|
||||
|
||||
# Github host to use for checking the source tree;
|
||||
# Override this variable ue with your own value if you're working on forked repo.
|
||||
GIT_HOST ?= github.com/morvencao
|
||||
|
||||
PWD := $(shell pwd)
|
||||
BASE_DIR := $(shell basename $(PWD))
|
||||
|
||||
# Keep an existing GOPATH, make a private one if it is undefined
|
||||
GOPATH_DEFAULT := $(PWD)/.go
|
||||
export GOPATH ?= $(GOPATH_DEFAULT)
|
||||
TESTARGS_DEFAULT := "-v"
|
||||
export TESTARGS ?= $(TESTARGS_DEFAULT)
|
||||
DEST := $(GOPATH)/src/$(GIT_HOST)/$(BASE_DIR)
|
||||
IMAGE_TAG ?= $(shell date +v%Y%m%d)-$(shell git describe --match=$(git rev-parse --short=8 HEAD) --tags --always --dirty)
|
||||
|
||||
|
||||
LOCAL_OS := $(shell uname)
|
||||
ifeq ($(LOCAL_OS),Linux)
|
||||
TARGET_OS ?= linux
|
||||
XARGS_FLAGS="-r"
|
||||
else ifeq ($(LOCAL_OS),Darwin)
|
||||
TARGET_OS ?= darwin
|
||||
XARGS_FLAGS=
|
||||
else
|
||||
$(error "This system's OS $(LOCAL_OS) isn't recognized/supported")
|
||||
endif
|
||||
|
||||
all: fmt lint test build image
|
||||
|
||||
ifeq (,$(wildcard go.mod))
|
||||
ifneq ("$(realpath $(DEST))", "$(realpath $(PWD))")
|
||||
$(error Please run 'make' from $(DEST). Current directory is $(PWD))
|
||||
endif
|
||||
endif
|
||||
|
||||
############################################################
|
||||
# format section
|
||||
############################################################
|
||||
|
||||
fmt:
|
||||
@echo "Run go fmt..."
|
||||
|
||||
############################################################
|
||||
# lint section
|
||||
############################################################
|
||||
|
||||
lint:
|
||||
@echo "Runing the golangci-lint..."
|
||||
|
||||
############################################################
|
||||
# test section
|
||||
############################################################
|
||||
|
||||
test:
|
||||
@echo "Running the tests for $(IMAGE_NAME)..."
|
||||
@go test $(TESTARGS) ./...
|
||||
|
||||
############################################################
|
||||
# build section
|
||||
############################################################
|
||||
|
||||
build:
|
||||
@echo "Building the $(IMAGE_NAME) binary..."
|
||||
@CGO_ENABLED=0 go build -o build/_output/bin/$(IMAGE_NAME) ./cmd/
|
||||
|
||||
build-linux:
|
||||
@echo "Building the $(IMAGE_NAME) binary for Docker (linux)..."
|
||||
@GOOS=linux GOARCH=amd64 CGO_ENABLED=0 go build -o build/_output/linux/bin/$(IMAGE_NAME) ./cmd/
|
||||
|
||||
############################################################
|
||||
# image section
|
||||
############################################################
|
||||
|
||||
image: build-image push-image
|
||||
|
||||
build-image: build-linux
|
||||
@echo "Building the docker image: $(IMAGE_REPO)/$(IMAGE_NAME):$(IMAGE_TAG)..."
|
||||
@docker build -t $(IMAGE_REPO)/$(IMAGE_NAME):$(IMAGE_TAG) -f build/Dockerfile .
|
||||
|
||||
push-image: build-image
|
||||
@echo "Pushing the docker image for $(IMAGE_REPO)/$(IMAGE_NAME):$(IMAGE_TAG) and $(IMAGE_REPO)/$(IMAGE_NAME):latest..."
|
||||
@docker tag $(IMAGE_REPO)/$(IMAGE_NAME):$(IMAGE_TAG) $(IMAGE_REPO)/$(IMAGE_NAME):latest
|
||||
@docker push $(IMAGE_REPO)/$(IMAGE_NAME):$(IMAGE_TAG)
|
||||
@docker push $(IMAGE_REPO)/$(IMAGE_NAME):latest
|
||||
|
||||
############################################################
|
||||
# clean section
|
||||
############################################################
|
||||
clean:
|
||||
@rm -rf build/_output
|
||||
|
||||
.PHONY: all fmt lint check test build image clean
|
84
secret-injector/README.md
Normal file
84
secret-injector/README.md
Normal file
@@ -0,0 +1,84 @@
|
||||
## Deploy
|
||||
|
||||
1. Create namespace `op-secret-injector` in which the 1Password secret injector webhook is deployed:
|
||||
|
||||
```
|
||||
# kubectl create ns op-secret-injector
|
||||
```
|
||||
|
||||
2. Create a signed cert/key pair and store it in a Kubernetes `secret` that will be consumed by 1Password secret injector deployment:
|
||||
|
||||
```
|
||||
# ./deploy/webhook-create-signed-cert.sh \
|
||||
--service op-secret-injector-webhook-svc \
|
||||
--secret op-secret-injector-webhook-certs \
|
||||
--namespace op-secret-injector
|
||||
```
|
||||
|
||||
3. Patch the `MutatingWebhookConfiguration` by set `caBundle` with correct value from Kubernetes cluster:
|
||||
|
||||
```
|
||||
# cat deploy/mutatingwebhook.yaml | \
|
||||
deploy/webhook-patch-ca-bundle.sh > \
|
||||
deploy/mutatingwebhook-ca-bundle.yaml
|
||||
```
|
||||
|
||||
4. Deploy resources:
|
||||
|
||||
```
|
||||
# kubectl create -f deploy/deployment.yaml
|
||||
# kubectl create -f deploy/service.yaml
|
||||
# kubectl create -f deploy/mutatingwebhook-ca-bundle.yaml
|
||||
```
|
||||
|
||||
## Verify
|
||||
|
||||
1. The sidecar inject webhook should be in running state:
|
||||
|
||||
```
|
||||
# kubectl -n sidecar-injector get pod
|
||||
NAME READY STATUS RESTARTS AGE
|
||||
sidecar-injector-webhook-deployment-7c8bc5f4c9-28c84 1/1 Running 0 30s
|
||||
# kubectl -n sidecar-injector get deploy
|
||||
NAME READY UP-TO-DATE AVAILABLE AGE
|
||||
sidecar-injector-webhook-deployment 1/1 1 1 67s
|
||||
```
|
||||
|
||||
2. Create new namespace `injection` and label it with `sidecar-injector=enabled`:
|
||||
|
||||
```
|
||||
# kubectl create ns injection
|
||||
# kubectl label namespace injection sidecar-injection=enabled
|
||||
# kubectl get namespace -L sidecar-injection
|
||||
NAME STATUS AGE SIDECAR-INJECTION
|
||||
default Active 26m
|
||||
injection Active 13s enabled
|
||||
kube-public Active 26m
|
||||
kube-system Active 26m
|
||||
sidecar-injector Active 17m
|
||||
```
|
||||
|
||||
3. Deploy an app in Kubernetes cluster, take `alpine` app as an example
|
||||
|
||||
```
|
||||
# kubectl run alpine --image=alpine --restart=Never -n injection --overrides='{"apiVersion":"v1","metadata":{"annotations":{"sidecar-injector-webhook.morven.me/inject":"yes"}}}' --command -- sleep infinity
|
||||
```
|
||||
|
||||
4. Verify sidecar container is injected:
|
||||
|
||||
```
|
||||
# kubectl get pod
|
||||
NAME READY STATUS RESTARTS AGE
|
||||
alpine 2/2 Running 0 1m
|
||||
# kubectl -n injection get pod alpine -o jsonpath="{.spec.containers[*].name}"
|
||||
alpine sidecar-nginx
|
||||
```
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
Sometimes you may find that pod is injected with sidecar container as expected, check the following items:
|
||||
|
||||
1. The sidecar-injector webhook is in running state and no error logs.
|
||||
2. The namespace in which application pod is deployed has the correct labels as configured in `mutatingwebhookconfiguration`.
|
||||
3. Check the `caBundle` is patched to `mutatingwebhookconfiguration` object by checking if `caBundle` fields is empty.
|
||||
4. Check if the application pod has annotation `sidecar-injector-webhook.morven.me/inject":"yes"`.
|
52
secret-injector/app_example.yaml
Normal file
52
secret-injector/app_example.yaml
Normal file
@@ -0,0 +1,52 @@
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: app-example
|
||||
spec:
|
||||
type: NodePort
|
||||
selector:
|
||||
app: app-example
|
||||
ports:
|
||||
- port: 5000
|
||||
name: app-example
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: app-example
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app: app-example
|
||||
template:
|
||||
metadata:
|
||||
annotations:
|
||||
operator.1password.io/inject: "app-example"
|
||||
labels:
|
||||
app: app-example
|
||||
spec:
|
||||
containers:
|
||||
- name: app-example
|
||||
command: ["./example"]
|
||||
image: connect-app-example:latest
|
||||
imagePullPolicy: Never
|
||||
resources:
|
||||
limits:
|
||||
memory: "128Mi"
|
||||
cpu: "0.2"
|
||||
ports:
|
||||
- containerPort: 5000
|
||||
env:
|
||||
- name: OP_VAULT
|
||||
value: ApplicationConfiguration
|
||||
- name: APP_TITLE
|
||||
value: op://ApplicationConfiguration/Webapp/title
|
||||
- name: BUTTON_TEXT
|
||||
value: op://ApplicationConfiguration/Webapp/action
|
||||
- name: OP_CONNECT_HOST
|
||||
value: http://onepassword-connect:8080/
|
||||
- name: OP_CONNECT_TOKEN
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: onepassword-token
|
||||
key: token
|
85
secret-injector/cmd/main.go
Normal file
85
secret-injector/cmd/main.go
Normal file
@@ -0,0 +1,85 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"flag"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"os"
|
||||
"os/signal"
|
||||
"syscall"
|
||||
|
||||
"github.com/1Password/onepassword-operator/secret-injector/pkg/webhook"
|
||||
"github.com/golang/glog"
|
||||
)
|
||||
|
||||
const (
|
||||
connectTokenSecretKeyEnv = "OP_CONNECT_TOKEN_KEY"
|
||||
connectTokenSecretNameEnv = "OP_CONNECT_TOKEN_NAME"
|
||||
connectHostEnv = "OP_CONNECT_HOST"
|
||||
)
|
||||
|
||||
func main() {
|
||||
var parameters webhook.WebhookServerParameters
|
||||
|
||||
glog.Info("Starting webhook")
|
||||
// get command line parameters
|
||||
flag.IntVar(¶meters.Port, "port", 8443, "Webhook server port.")
|
||||
flag.StringVar(¶meters.CertFile, "tlsCertFile", "/etc/webhook/certs/cert.pem", "File containing the x509 Certificate for HTTPS.")
|
||||
flag.StringVar(¶meters.KeyFile, "tlsKeyFile", "/etc/webhook/certs/key.pem", "File containing the x509 private key to --tlsCertFile.")
|
||||
flag.Parse()
|
||||
|
||||
pair, err := tls.LoadX509KeyPair(parameters.CertFile, parameters.KeyFile)
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to load key pair: %v", err)
|
||||
}
|
||||
|
||||
connectHost, present := os.LookupEnv(connectHostEnv)
|
||||
if !present {
|
||||
glog.Error("")
|
||||
}
|
||||
|
||||
connectTokenName, present := os.LookupEnv(connectTokenSecretNameEnv)
|
||||
if !present {
|
||||
glog.Error("")
|
||||
}
|
||||
|
||||
connectTokenKey, present := os.LookupEnv(connectTokenSecretKeyEnv)
|
||||
if !present {
|
||||
glog.Error("")
|
||||
}
|
||||
|
||||
webhookConfig := webhook.Config{
|
||||
ConnectHost: connectHost,
|
||||
ConnectTokenName: connectTokenName,
|
||||
ConnectTokenKey: connectTokenKey,
|
||||
}
|
||||
webhookServer := &webhook.WebhookServer{
|
||||
Config: webhookConfig,
|
||||
Server: &http.Server{
|
||||
Addr: fmt.Sprintf(":%v", parameters.Port),
|
||||
TLSConfig: &tls.Config{Certificates: []tls.Certificate{pair}},
|
||||
},
|
||||
}
|
||||
|
||||
// define http server and server handler
|
||||
mux := http.NewServeMux()
|
||||
mux.HandleFunc("/inject", webhookServer.Serve)
|
||||
webhookServer.Server.Handler = mux
|
||||
|
||||
// start webhook server in new rountine
|
||||
go func() {
|
||||
if err := webhookServer.Server.ListenAndServeTLS("", ""); err != nil {
|
||||
glog.Errorf("Failed to listen and serve webhook server: %v", err)
|
||||
}
|
||||
}()
|
||||
|
||||
// listening OS shutdown singal
|
||||
signalChan := make(chan os.Signal, 1)
|
||||
signal.Notify(signalChan, syscall.SIGINT, syscall.SIGTERM)
|
||||
<-signalChan
|
||||
|
||||
glog.Infof("Got OS shutdown signal, shutting down webhook server gracefully...")
|
||||
webhookServer.Server.Shutdown(context.Background())
|
||||
}
|
42
secret-injector/deploy/deployment.yaml
Normal file
42
secret-injector/deploy/deployment.yaml
Normal file
@@ -0,0 +1,42 @@
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: op-secret-injector-webhook-deployment
|
||||
namespace: op-secret-injector
|
||||
labels:
|
||||
app: op-secret-injector
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: op-secret-injector
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: op-secret-injector
|
||||
spec:
|
||||
containers:
|
||||
- name: op-secret-injector
|
||||
image: local/onepassword-secrets-injector:v1.1.0
|
||||
imagePullPolicy: Never
|
||||
args:
|
||||
- -tlsCertFile=/etc/webhook/certs/cert.pem
|
||||
- -tlsKeyFile=/etc/webhook/certs/key.pem
|
||||
- -alsologtostderr
|
||||
- -v=4
|
||||
- 2>&1
|
||||
env:
|
||||
- name: OP_CONNECT_HOST
|
||||
value: http://onepassword-connect:8080/
|
||||
- name: OP_CONNECT_TOKEN_NAME
|
||||
value: onepassword-token
|
||||
- name: OP_CONNECT_TOKEN_KEY
|
||||
value: token
|
||||
volumeMounts:
|
||||
- name: webhook-certs
|
||||
mountPath: /etc/webhook/certs
|
||||
readOnly: true
|
||||
volumes:
|
||||
- name: webhook-certs
|
||||
secret:
|
||||
secretName: op-secret-injector-webhook-certs
|
22
secret-injector/deploy/mutatingwebhook-ca-bundle.yaml
Normal file
22
secret-injector/deploy/mutatingwebhook-ca-bundle.yaml
Normal file
@@ -0,0 +1,22 @@
|
||||
apiVersion: admissionregistration.k8s.io/v1beta1
|
||||
kind: MutatingWebhookConfiguration
|
||||
metadata:
|
||||
name: op-secret-injector-webhook-cfg
|
||||
labels:
|
||||
app: op-secret-injector
|
||||
webhooks:
|
||||
- name: op-secret-injector.morven.me
|
||||
clientConfig:
|
||||
service:
|
||||
name: op-secret-injector-webhook-svc
|
||||
namespace: op-secret-injector
|
||||
path: "/inject"
|
||||
caBundle: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURCakNDQWU2Z0F3SUJBZ0lCQVRBTkJna3Foa2lHOXcwQkFRc0ZBREFWTVJNd0VRWURWUVFERXdwdGFXNXAKYTNWaVpVTkJNQjRYRFRJd01Ea3lOekl3TkRjMU9Wb1hEVE13TURreU5qSXdORGMxT1Zvd0ZURVRNQkVHQTFVRQpBeE1LYldsdWFXdDFZbVZEUVRDQ0FTSXdEUVlKS29aSWh2Y05BUUVCQlFBRGdnRVBBRENDQVFvQ2dnRUJBTmlICjZzZVJsZG9CWlRpRVJMeVhwbXFCU3ZOcmJyMWFMcVhpZVVWcXdCcytOUUora1hsazBIRWFldnJRU2QvNnVqY2UKSHpuNFR6Smh3Qk9pYU5BSDN6QUZkeXZxRGwwZVFzNm50R2pDbVFFK0xrUU5PQlVXYmk3WEc2am1tdDA5aFFUVwpTOXg2UDdpai9lUUtLRUJFQTFlRWYvTFZibDZPMVBqa0lXV2E0SjFRMEZoQUtnSjdxUmVJaEg1dkRoVHF3TXVzClZLTEF2bU9xRk03aDNmZ1UzWVltZldpMUFoVnF0VklMYmhkOS8xbzFYM2ZESitFK0dESGMyb0NKK1QvQkxJTmsKOWhTWEhWOTdONFhib1BUWktzZXJFa3JQTnlFYkY4alpvWndBc3FuRVhBNW5sem5vTlJnTFNqSEE0NFZXOGZyawo1RWtJdFNPdkFMMHM1K0FDMnFzQ0F3RUFBYU5oTUY4d0RnWURWUjBQQVFIL0JBUURBZ0trTUIwR0ExVWRKUVFXCk1CUUdDQ3NHQVFVRkJ3TUNCZ2dyQmdFRkJRY0RBVEFQQmdOVkhSTUJBZjhFQlRBREFRSC9NQjBHQTFVZERnUVcKQkJTSG5DcFRMSDRQeDRkai9oWTVNWEx6TndNeWhqQU5CZ2txaGtpRzl3MEJBUXNGQUFPQ0FRRUFvamR6ZzlySgpZTjlJSTJjaUNRS0djZEZGbWtHcGxiandRczBVMVdKY0plZWs3WDh3WWdPMnI3UFhLRklDTEM3aGM5bkUxYnluCkxha2YwMzhUNzRBQzlQOHZXUUJSb2lFMlBKV1BGMjhGTFJWeWgwTWdYQ3dZU20zeitIRDR5TjViWFpNSmJ4WlYKamRza0IzeVpHSW9jZ2RBSk1rU0ptQTN6RkowaHpsY09EZTNOTVA0Ujl4Z0VWczU4bHV5bjl6bm5sL2FDODlHdwpuVnVPRkk0S0dwOFF5NXFjQUxKZndiRGNrNzBjbnRQUEhBN2trT1JtUG41Z2hNSFJPZGxsamxmdXYxVE5RcVo2CjhjUENRRW1zc1ZyajFrVEh6Y3FOUXpqOWVMK2VPMGtyRWw2dzZMcm5YY0dleUxIZVc1cHF6YUY2bWZrTitEZEQKSENjV0U2V1pvTUp2UFE9PQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg==
|
||||
rules:
|
||||
- operations: ["CREATE", "UPDATE"]
|
||||
apiGroups: [""]
|
||||
apiVersions: ["v1"]
|
||||
resources: ["pods"]
|
||||
namespaceSelector:
|
||||
matchLabels:
|
||||
op-secret-injection: enabled
|
22
secret-injector/deploy/mutatingwebhook.yaml
Normal file
22
secret-injector/deploy/mutatingwebhook.yaml
Normal file
@@ -0,0 +1,22 @@
|
||||
apiVersion: admissionregistration.k8s.io/v1beta1
|
||||
kind: MutatingWebhookConfiguration
|
||||
metadata:
|
||||
name: op-secret-injector-webhook-cfg
|
||||
labels:
|
||||
app: op-secret-injector
|
||||
webhooks:
|
||||
- name: op-secret-injector.morven.me
|
||||
clientConfig:
|
||||
service:
|
||||
name: op-secret-injector-webhook-svc
|
||||
namespace: op-secret-injector
|
||||
path: "/inject"
|
||||
caBundle: ${CA_BUNDLE}
|
||||
rules:
|
||||
- operations: ["CREATE", "UPDATE"]
|
||||
apiGroups: [""]
|
||||
apiVersions: ["v1"]
|
||||
resources: ["pods"]
|
||||
namespaceSelector:
|
||||
matchLabels:
|
||||
op-secret-injection: enabled
|
13
secret-injector/deploy/service.yaml
Normal file
13
secret-injector/deploy/service.yaml
Normal file
@@ -0,0 +1,13 @@
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: op-secret-injector-webhook-svc
|
||||
namespace: op-secret-injector
|
||||
labels:
|
||||
app: op-secret-injector
|
||||
spec:
|
||||
ports:
|
||||
- port: 443
|
||||
targetPort: 8443
|
||||
selector:
|
||||
app: op-secret-injector
|
131
secret-injector/deploy/webhook-create-signed-cert.sh
Executable file
131
secret-injector/deploy/webhook-create-signed-cert.sh
Executable file
@@ -0,0 +1,131 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -e
|
||||
|
||||
usage() {
|
||||
cat <<EOF
|
||||
Generate certificate suitable for use with an op-secret-injector webhook service.
|
||||
|
||||
This script uses k8s' CertificateSigningRequest API to a generate a
|
||||
certificate signed by k8s CA suitable for use with op-secret-injector webhook
|
||||
services. This requires permissions to create and approve CSR. See
|
||||
https://kubernetes.io/docs/tasks/tls/managing-tls-in-a-cluster for
|
||||
detailed explanation and additional instructions.
|
||||
|
||||
The server key/cert k8s CA cert are stored in a k8s secret.
|
||||
|
||||
usage: ${0} [OPTIONS]
|
||||
|
||||
The following flags are required.
|
||||
|
||||
--service Service name of webhook.
|
||||
--namespace Namespace where webhook service and secret reside.
|
||||
--secret Secret name for CA certificate and server certificate/key pair.
|
||||
EOF
|
||||
exit 1
|
||||
}
|
||||
|
||||
while [[ $# -gt 0 ]]; do
|
||||
case ${1} in
|
||||
--service)
|
||||
service="$2"
|
||||
shift
|
||||
;;
|
||||
--secret)
|
||||
secret="$2"
|
||||
shift
|
||||
;;
|
||||
--namespace)
|
||||
namespace="$2"
|
||||
shift
|
||||
;;
|
||||
*)
|
||||
usage
|
||||
;;
|
||||
esac
|
||||
shift
|
||||
done
|
||||
|
||||
[ -z "${service}" ] && service=op-secret-injector-webhook-svc
|
||||
[ -z "${secret}" ] && secret=op-secret-injector-webhook-certs
|
||||
[ -z "${namespace}" ] && namespace=default
|
||||
|
||||
if [ ! -x "$(command -v openssl)" ]; then
|
||||
echo "openssl not found"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
csrName=${service}.${namespace}
|
||||
tmpdir=$(mktemp -d)
|
||||
echo "creating certs in tmpdir ${tmpdir} "
|
||||
|
||||
cat <<EOF >> "${tmpdir}"/csr.conf
|
||||
[req]
|
||||
req_extensions = v3_req
|
||||
distinguished_name = req_distinguished_name
|
||||
[req_distinguished_name]
|
||||
[ v3_req ]
|
||||
basicConstraints = CA:FALSE
|
||||
keyUsage = nonRepudiation, digitalSignature, keyEncipherment
|
||||
extendedKeyUsage = serverAuth
|
||||
subjectAltName = @alt_names
|
||||
[alt_names]
|
||||
DNS.1 = ${service}
|
||||
DNS.2 = ${service}.${namespace}
|
||||
DNS.3 = ${service}.${namespace}.svc
|
||||
EOF
|
||||
|
||||
openssl genrsa -out "${tmpdir}"/server-key.pem 2048
|
||||
openssl req -new -key "${tmpdir}"/server-key.pem -subj "/CN=${service}.${namespace}.svc" -out "${tmpdir}"/server.csr -config "${tmpdir}"/csr.conf
|
||||
|
||||
# clean-up any previously created CSR for our service. Ignore errors if not present.
|
||||
kubectl delete csr ${csrName} 2>/dev/null || true
|
||||
|
||||
# create server cert/key CSR and send to k8s API
|
||||
cat <<EOF | kubectl create -f -
|
||||
apiVersion: certificates.k8s.io/v1beta1
|
||||
kind: CertificateSigningRequest
|
||||
metadata:
|
||||
name: ${csrName}
|
||||
spec:
|
||||
groups:
|
||||
- system:authenticated
|
||||
request: $(< "${tmpdir}"/server.csr base64 | tr -d '\n')
|
||||
usages:
|
||||
- digital signature
|
||||
- key encipherment
|
||||
- server auth
|
||||
EOF
|
||||
|
||||
# verify CSR has been created
|
||||
while true; do
|
||||
if kubectl get csr ${csrName}; then
|
||||
break
|
||||
else
|
||||
sleep 1
|
||||
fi
|
||||
done
|
||||
|
||||
# approve and fetch the signed certificate
|
||||
kubectl certificate approve ${csrName}
|
||||
# verify certificate has been signed
|
||||
for _ in $(seq 10); do
|
||||
serverCert=$(kubectl get csr ${csrName} -o jsonpath='{.status.certificate}')
|
||||
if [[ ${serverCert} != '' ]]; then
|
||||
break
|
||||
fi
|
||||
sleep 1
|
||||
done
|
||||
if [[ ${serverCert} == '' ]]; then
|
||||
echo "ERROR: After approving csr ${csrName}, the signed certificate did not appear on the resource. Giving up after 10 attempts." >&2
|
||||
exit 1
|
||||
fi
|
||||
echo "${serverCert}" | openssl base64 -d -A -out "${tmpdir}"/server-cert.pem
|
||||
|
||||
|
||||
# create the secret with CA cert and server cert/key
|
||||
kubectl create secret generic ${secret} \
|
||||
--from-file=key.pem="${tmpdir}"/server-key.pem \
|
||||
--from-file=cert.pem="${tmpdir}"/server-cert.pem \
|
||||
--dry-run -o yaml |
|
||||
kubectl -n ${namespace} apply -f -
|
19
secret-injector/deploy/webhook-patch-ca-bundle.sh
Executable file
19
secret-injector/deploy/webhook-patch-ca-bundle.sh
Executable file
@@ -0,0 +1,19 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -o errexit
|
||||
set -o nounset
|
||||
set -o pipefail
|
||||
|
||||
CA_BUNDLE=$(kubectl config view --raw --minify --flatten -o jsonpath='{.clusters[].cluster.certificate-authority-data}')
|
||||
|
||||
if [ -z "${CA_BUNDLE}" ]; then
|
||||
CA_BUNDLE=$(kubectl get secrets -o jsonpath="{.items[?(@.metadata.annotations['kubernetes\.io/service-account\.name']=='default')].data.ca\.crt}")
|
||||
fi
|
||||
|
||||
export CA_BUNDLE
|
||||
|
||||
if command -v envsubst >/dev/null 2>&1; then
|
||||
envsubst
|
||||
else
|
||||
sed -e "s|\${CA_BUNDLE}|${CA_BUNDLE}|g"
|
||||
fi
|
214
secret-injector/golangci.yml
Normal file
214
secret-injector/golangci.yml
Normal file
@@ -0,0 +1,214 @@
|
||||
service:
|
||||
# When updating this, also update the version stored in docker/build-tools/Dockerfile in the multicloudlab/tools repo.
|
||||
golangci-lint-version: 1.18.x # use the fixed version to not introduce new linters unexpectedly
|
||||
run:
|
||||
# timeout for analysis, e.g. 30s, 5m, default is 1m
|
||||
deadline: 20m
|
||||
|
||||
# which dirs to skip: they won't be analyzed;
|
||||
# can use regexp here: generated.*, regexp is applied on full path;
|
||||
# default value is empty list, but next dirs are always skipped independently
|
||||
# from this option's value:
|
||||
# vendor$, third_party$, testdata$, examples$, Godeps$, builtin$
|
||||
skip-dirs:
|
||||
- genfiles$
|
||||
- vendor$
|
||||
|
||||
# which files to skip: they will be analyzed, but issues from them
|
||||
# won't be reported. Default value is empty list, but there is
|
||||
# no need to include all autogenerated files, we confidently recognize
|
||||
# autogenerated files. If it's not please let us know.
|
||||
skip-files:
|
||||
- ".*\\.pb\\.go"
|
||||
- ".*\\.gen\\.go"
|
||||
|
||||
linters:
|
||||
# please, do not use `enable-all`: it's deprecated and will be removed soon.
|
||||
# inverted configuration with `enable-all` and `disable` is not scalable during updates of golangci-lint
|
||||
disable-all: true
|
||||
enable:
|
||||
- deadcode
|
||||
- errcheck
|
||||
- gocyclo
|
||||
- gofmt
|
||||
- goimports
|
||||
- golint
|
||||
- gosec
|
||||
- gosimple
|
||||
- govet
|
||||
- ineffassign
|
||||
- interfacer
|
||||
- lll
|
||||
- misspell
|
||||
- staticcheck
|
||||
- structcheck
|
||||
- typecheck
|
||||
- unconvert
|
||||
- unparam
|
||||
- unused
|
||||
- varcheck
|
||||
# don't enable:
|
||||
# - gocritic
|
||||
# - bodyclose
|
||||
# - depguard
|
||||
# - dogsled
|
||||
# - dupl
|
||||
# - funlen
|
||||
# - gochecknoglobals
|
||||
# - gochecknoinits
|
||||
# - gocognit
|
||||
# - godox
|
||||
# - maligned
|
||||
# - nakedret
|
||||
# - prealloc
|
||||
# - scopelint
|
||||
# - whitespace
|
||||
# - stylecheck
|
||||
|
||||
linters-settings:
|
||||
errcheck:
|
||||
# report about not checking of errors in type assetions: `a := b.(MyStruct)`;
|
||||
# default is false: such cases aren't reported by default.
|
||||
check-type-assertions: false
|
||||
|
||||
# report about assignment of errors to blank identifier: `num, _ := strconv.Atoi(numStr)`;
|
||||
# default is false: such cases aren't reported by default.
|
||||
check-blank: false
|
||||
govet:
|
||||
# report about shadowed variables
|
||||
check-shadowing: false
|
||||
golint:
|
||||
# minimal confidence for issues, default is 0.8
|
||||
min-confidence: 0.0
|
||||
gofmt:
|
||||
# simplify code: gofmt with `-s` option, true by default
|
||||
simplify: true
|
||||
goimports:
|
||||
# put imports beginning with prefix after 3rd-party packages;
|
||||
# it's a comma-separated list of prefixes
|
||||
local-prefixes: github.com/IBM/
|
||||
maligned:
|
||||
# print struct with more effective memory layout or not, false by default
|
||||
suggest-new: true
|
||||
misspell:
|
||||
# Correct spellings using locale preferences for US or UK.
|
||||
# Default is to use a neutral variety of English.
|
||||
# Setting locale to US will correct the British spelling of 'colour' to 'color'.
|
||||
locale: US
|
||||
ignore-words:
|
||||
- cancelled
|
||||
lll:
|
||||
# max line length, lines longer will be reported. Default is 120.
|
||||
# '\t' is counted as 1 character by default, and can be changed with the tab-width option
|
||||
line-length: 300
|
||||
# tab width in spaces. Default to 1.
|
||||
tab-width: 1
|
||||
unused:
|
||||
# treat code as a program (not a library) and report unused exported identifiers; default is false.
|
||||
# XXX: if you enable this setting, unused will report a lot of false-positives in text editors:
|
||||
# if it's called for subdir of a project it can't find funcs usages. All text editor integrations
|
||||
# with golangci-lint call it on a directory with the changed file.
|
||||
check-exported: false
|
||||
unparam:
|
||||
# call graph construction algorithm (cha, rta). In general, use cha for libraries,
|
||||
# and rta for programs with main packages. Default is cha.
|
||||
algo: cha
|
||||
|
||||
# Inspect exported functions, default is false. Set to true if no external program/library imports your code.
|
||||
# XXX: if you enable this setting, unparam will report a lot of false-positives in text editors:
|
||||
# if it's called for subdir of a project it can't find external interfaces. All text editor integrations
|
||||
# with golangci-lint call it on a directory with the changed file.
|
||||
check-exported: false
|
||||
gocritic:
|
||||
enabled-checks:
|
||||
- appendCombine
|
||||
- argOrder
|
||||
- assignOp
|
||||
- badCond
|
||||
- boolExprSimplify
|
||||
- builtinShadow
|
||||
- captLocal
|
||||
- caseOrder
|
||||
- codegenComment
|
||||
- commentedOutCode
|
||||
- commentedOutImport
|
||||
- defaultCaseOrder
|
||||
- deprecatedComment
|
||||
- docStub
|
||||
- dupArg
|
||||
- dupBranchBody
|
||||
- dupCase
|
||||
- dupSubExpr
|
||||
- elseif
|
||||
- emptyFallthrough
|
||||
- equalFold
|
||||
- flagDeref
|
||||
- flagName
|
||||
- hexLiteral
|
||||
- indexAlloc
|
||||
- initClause
|
||||
- methodExprCall
|
||||
- nilValReturn
|
||||
- octalLiteral
|
||||
- offBy1
|
||||
- rangeExprCopy
|
||||
- regexpMust
|
||||
- sloppyLen
|
||||
- stringXbytes
|
||||
- switchTrue
|
||||
- typeAssertChain
|
||||
- typeSwitchVar
|
||||
- typeUnparen
|
||||
- underef
|
||||
- unlambda
|
||||
- unnecessaryBlock
|
||||
- unslice
|
||||
- valSwap
|
||||
- weakCond
|
||||
|
||||
# Unused
|
||||
# - yodaStyleExpr
|
||||
# - appendAssign
|
||||
# - commentFormatting
|
||||
# - emptyStringTest
|
||||
# - exitAfterDefer
|
||||
# - ifElseChain
|
||||
# - hugeParam
|
||||
# - importShadow
|
||||
# - nestingReduce
|
||||
# - paramTypeCombine
|
||||
# - ptrToRefParam
|
||||
# - rangeValCopy
|
||||
# - singleCaseSwitch
|
||||
# - sloppyReassign
|
||||
# - unlabelStmt
|
||||
# - unnamedResult
|
||||
# - wrapperFunc
|
||||
|
||||
issues:
|
||||
# List of regexps of issue texts to exclude, empty list by default.
|
||||
# But independently from this option we use default exclude patterns,
|
||||
# it can be disabled by `exclude-use-default: false`. To list all
|
||||
# excluded by default patterns execute `golangci-lint run --help`
|
||||
exclude:
|
||||
- composite literal uses unkeyed fields
|
||||
|
||||
exclude-rules:
|
||||
# Exclude some linters from running on test files.
|
||||
- path: _test\.go$|^tests/|^samples/
|
||||
linters:
|
||||
- errcheck
|
||||
- maligned
|
||||
|
||||
# Independently from option `exclude` we use default exclude patterns,
|
||||
# it can be disabled by this option. To list all
|
||||
# excluded by default patterns execute `golangci-lint run --help`.
|
||||
# Default value for this option is true.
|
||||
exclude-use-default: true
|
||||
|
||||
# Maximum issues count per one linter. Set to 0 to disable. Default is 50.
|
||||
max-per-linter: 0
|
||||
|
||||
# Maximum count of issues with the same text. Set to 0 to disable. Default is 3.
|
||||
max-same-issues: 0
|
||||
|
469
secret-injector/pkg/webhook/webhook.go
Normal file
469
secret-injector/pkg/webhook/webhook.go
Normal file
@@ -0,0 +1,469 @@
|
||||
package webhook
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"strings"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/api/admission/v1beta1"
|
||||
admissionregistrationv1beta1 "k8s.io/api/admissionregistration/v1beta1"
|
||||
v1 "k8s.io/api/apps/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/serializer"
|
||||
)
|
||||
|
||||
const (
|
||||
// binVolumeName is the name of the volume where the OP CLI binary is stored.
|
||||
binVolumeName = "op-bin"
|
||||
|
||||
// binVolumeMountPath is the mount path where the OP CLI binary can be found.
|
||||
binVolumeMountPath = "/op/bin/"
|
||||
)
|
||||
|
||||
// binVolume is the shared, in-memory volume where the OP CLI binary lives.
|
||||
var binVolume = corev1.Volume{
|
||||
Name: binVolumeName,
|
||||
VolumeSource: corev1.VolumeSource{
|
||||
EmptyDir: &corev1.EmptyDirVolumeSource{
|
||||
Medium: corev1.StorageMediumMemory,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
// binVolumeMount is the shared volume mount where the OP CLI binary lives.
|
||||
var binVolumeMount = corev1.VolumeMount{
|
||||
Name: binVolumeName,
|
||||
MountPath: binVolumeMountPath,
|
||||
ReadOnly: true,
|
||||
}
|
||||
|
||||
var (
|
||||
runtimeScheme = runtime.NewScheme()
|
||||
codecs = serializer.NewCodecFactory(runtimeScheme)
|
||||
deserializer = codecs.UniversalDeserializer()
|
||||
|
||||
// (https://github.com/kubernetes/kubernetes/issues/57982)
|
||||
defaulter = runtime.ObjectDefaulter(runtimeScheme)
|
||||
)
|
||||
|
||||
var ignoredNamespaces = []string{
|
||||
metav1.NamespaceSystem,
|
||||
metav1.NamespacePublic,
|
||||
}
|
||||
|
||||
const (
|
||||
injectionStatus = "operator.1password.io/status"
|
||||
injectAnnotation = "operator.1password.io/inject"
|
||||
versionAnnotation = "operator.1password.io/version"
|
||||
)
|
||||
|
||||
type WebhookServer struct {
|
||||
Config Config
|
||||
Server *http.Server
|
||||
}
|
||||
|
||||
// Webhook Server parameters
|
||||
type WebhookServerParameters struct {
|
||||
Port int // webhook server port
|
||||
CertFile string // path to the x509 certificate for https
|
||||
KeyFile string // path to the x509 private key matching `CertFile`
|
||||
}
|
||||
|
||||
type Config struct {
|
||||
ConnectHost string
|
||||
ConnectTokenName string
|
||||
ConnectTokenKey string
|
||||
}
|
||||
|
||||
type patchOperation struct {
|
||||
Op string `json:"op"`
|
||||
Path string `json:"path"`
|
||||
Value interface{} `json:"value,omitempty"`
|
||||
}
|
||||
|
||||
func init() {
|
||||
_ = corev1.AddToScheme(runtimeScheme)
|
||||
_ = admissionregistrationv1beta1.AddToScheme(runtimeScheme)
|
||||
_ = v1.AddToScheme(runtimeScheme)
|
||||
}
|
||||
|
||||
func applyDefaultsWorkaround(containers []corev1.Container, volumes []corev1.Volume) {
|
||||
defaulter.Default(&corev1.Pod{
|
||||
Spec: corev1.PodSpec{
|
||||
Containers: containers,
|
||||
Volumes: volumes,
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
// Check whether the target resoured need to be mutated
|
||||
func mutationRequired(ignoredList []string, metadata *metav1.ObjectMeta) bool {
|
||||
// skip special kubernete system namespaces
|
||||
for _, namespace := range ignoredList {
|
||||
if metadata.Namespace == namespace {
|
||||
glog.Infof("Skip mutation for %v for it's in special namespace:%v", metadata.Name, metadata.Namespace)
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
annotations := metadata.GetAnnotations()
|
||||
if annotations == nil {
|
||||
annotations = map[string]string{}
|
||||
}
|
||||
|
||||
status := annotations[injectionStatus]
|
||||
_, enabled := annotations[injectAnnotation]
|
||||
|
||||
// determine whether to perform mutation based on annotation for the target resource
|
||||
required := false
|
||||
if strings.ToLower(status) != "injected" && enabled {
|
||||
required = true
|
||||
}
|
||||
|
||||
glog.Infof("Mutation policy for %v/%v: status: %q required:%v", metadata.Namespace, metadata.Name, status, required)
|
||||
return required
|
||||
}
|
||||
|
||||
func addContainers(target, added []corev1.Container, basePath string) (patch []patchOperation) {
|
||||
first := len(target) == 0
|
||||
var value interface{}
|
||||
for _, add := range added {
|
||||
value = add
|
||||
path := basePath
|
||||
if first {
|
||||
first = false
|
||||
value = []corev1.Container{add}
|
||||
} else {
|
||||
path = path + "/-"
|
||||
}
|
||||
patch = append(patch, patchOperation{
|
||||
Op: "add",
|
||||
Path: path,
|
||||
Value: value,
|
||||
})
|
||||
}
|
||||
return patch
|
||||
}
|
||||
|
||||
func addVolume(target, added []corev1.Volume, basePath string) (patch []patchOperation) {
|
||||
first := len(target) == 0
|
||||
var value interface{}
|
||||
for _, add := range added {
|
||||
value = add
|
||||
path := basePath
|
||||
if first {
|
||||
first = false
|
||||
value = []corev1.Volume{add}
|
||||
} else {
|
||||
path = path + "/-"
|
||||
}
|
||||
patch = append(patch, patchOperation{
|
||||
Op: "add",
|
||||
Path: path,
|
||||
Value: value,
|
||||
})
|
||||
}
|
||||
return patch
|
||||
}
|
||||
|
||||
func updateAnnotation(target map[string]string, added map[string]string) (patch []patchOperation) {
|
||||
for key, value := range added {
|
||||
if target == nil || target[key] == "" {
|
||||
target = map[string]string{}
|
||||
patch = append(patch, patchOperation{
|
||||
Op: "add",
|
||||
Path: "/metadata/annotations",
|
||||
Value: map[string]string{
|
||||
key: value,
|
||||
},
|
||||
})
|
||||
} else {
|
||||
patch = append(patch, patchOperation{
|
||||
Op: "replace",
|
||||
Path: "/metadata/annotations/" + key,
|
||||
Value: value,
|
||||
})
|
||||
}
|
||||
}
|
||||
return patch
|
||||
}
|
||||
|
||||
// main mutation process
|
||||
func (whsvr *WebhookServer) mutate(ar *v1beta1.AdmissionReview) *v1beta1.AdmissionResponse {
|
||||
ctx := context.Background()
|
||||
req := ar.Request
|
||||
var pod corev1.Pod
|
||||
if err := json.Unmarshal(req.Object.Raw, &pod); err != nil {
|
||||
glog.Errorf("Could not unmarshal raw object: %v", err)
|
||||
return &v1beta1.AdmissionResponse{
|
||||
Result: &metav1.Status{
|
||||
Message: err.Error(),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
glog.Infof("AdmissionReview for Kind=%v, Namespace=%v Name=%v (%v) UID=%v patchOperation=%v UserInfo=%v",
|
||||
req.Kind, req.Namespace, req.Name, pod.Name, req.UID, req.Operation, req.UserInfo)
|
||||
|
||||
// determine whether to perform mutation
|
||||
if !mutationRequired(ignoredNamespaces, &pod.ObjectMeta) {
|
||||
glog.Infof("Skipping mutation for %s/%s due to policy check", pod.Namespace, pod.Name)
|
||||
return &v1beta1.AdmissionResponse{
|
||||
Allowed: true,
|
||||
}
|
||||
}
|
||||
|
||||
containersStr := pod.Annotations[injectAnnotation]
|
||||
|
||||
containers := map[string]struct{}{}
|
||||
|
||||
for _, container := range strings.Split(containersStr, ",") {
|
||||
containers[container] = struct{}{}
|
||||
}
|
||||
|
||||
version, ok := pod.Annotations[versionAnnotation]
|
||||
if !ok {
|
||||
version = "latest"
|
||||
}
|
||||
|
||||
mutated := false
|
||||
|
||||
var patch []patchOperation
|
||||
for i, c := range pod.Spec.InitContainers {
|
||||
_, mutate := containers[c.Name]
|
||||
if !mutate {
|
||||
continue
|
||||
}
|
||||
c, didMutate, initContainerPatch, err := whsvr.mutateContainer(ctx, &c, i)
|
||||
if err != nil {
|
||||
return &v1beta1.AdmissionResponse{
|
||||
Result: &metav1.Status{
|
||||
Message: err.Error(),
|
||||
},
|
||||
}
|
||||
}
|
||||
if didMutate {
|
||||
mutated = true
|
||||
pod.Spec.InitContainers[i] = *c
|
||||
}
|
||||
patch = append(patch, initContainerPatch...)
|
||||
}
|
||||
|
||||
for i, c := range pod.Spec.Containers {
|
||||
_, mutate := containers[c.Name]
|
||||
if !mutate {
|
||||
continue
|
||||
}
|
||||
|
||||
c, didMutate, containerPatch, err := whsvr.mutateContainer(ctx, &c, i)
|
||||
if err != nil {
|
||||
glog.Errorf("Error occured mutating container: ", err)
|
||||
return &v1beta1.AdmissionResponse{
|
||||
Result: &metav1.Status{
|
||||
Message: err.Error(),
|
||||
},
|
||||
}
|
||||
}
|
||||
patch = append(patch, containerPatch...)
|
||||
if didMutate {
|
||||
mutated = true
|
||||
pod.Spec.Containers[i] = *c
|
||||
}
|
||||
}
|
||||
|
||||
// binInitContainer is the container that pulls the OP CLI
|
||||
// into a shared volume mount.
|
||||
var binInitContainer = corev1.Container{
|
||||
Name: "copy-op-bin",
|
||||
Image: "op-example" + ":" + version,
|
||||
ImagePullPolicy: corev1.PullIfNotPresent,
|
||||
Command: []string{"sh", "-c",
|
||||
fmt.Sprintf("cp /usr/local/bin/op %s", binVolumeMountPath)},
|
||||
VolumeMounts: []corev1.VolumeMount{
|
||||
{
|
||||
Name: binVolumeName,
|
||||
MountPath: binVolumeMountPath,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
if !mutated {
|
||||
glog.Infof("No mutations made for %s/%s", pod.Namespace, pod.Name)
|
||||
return &v1beta1.AdmissionResponse{
|
||||
Allowed: true,
|
||||
}
|
||||
}
|
||||
annotations := map[string]string{injectionStatus: "injected"}
|
||||
patchBytes, err := createOPCLIPatch(&pod, annotations, []corev1.Container{binInitContainer}, patch)
|
||||
if err != nil {
|
||||
return &v1beta1.AdmissionResponse{
|
||||
Result: &metav1.Status{
|
||||
Message: err.Error(),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
glog.Infof("AdmissionResponse: patch=%v\n", string(patchBytes))
|
||||
return &v1beta1.AdmissionResponse{
|
||||
Allowed: true,
|
||||
Patch: patchBytes,
|
||||
PatchType: func() *v1beta1.PatchType {
|
||||
pt := v1beta1.PatchTypeJSONPatch
|
||||
return &pt
|
||||
}(),
|
||||
}
|
||||
}
|
||||
|
||||
// create mutation patch for resoures
|
||||
func createOPCLIPatch(pod *corev1.Pod, annotations map[string]string, containers []corev1.Container, patch []patchOperation) ([]byte, error) {
|
||||
|
||||
patch = append(patch, addVolume(pod.Spec.Volumes, []corev1.Volume{binVolume}, "/spec/volumes")...)
|
||||
patch = append(patch, addContainers(pod.Spec.InitContainers, containers, "/spec/initContainers")...)
|
||||
patch = append(patch, updateAnnotation(pod.Annotations, annotations)...)
|
||||
|
||||
return json.Marshal(patch)
|
||||
}
|
||||
|
||||
func createOPConnectPatch(container *corev1.Container, containerIndex int, host, tokenSecretName, tokenSecretKey string) []patchOperation {
|
||||
var patch []patchOperation
|
||||
connectHostEnvVar := corev1.EnvVar{
|
||||
Name: "OP_CONNECT_HOST",
|
||||
Value: host,
|
||||
}
|
||||
|
||||
connectTokenEnvVar := corev1.EnvVar{
|
||||
Name: "OP_CONNECT_TOKEN",
|
||||
ValueFrom: &corev1.EnvVarSource{
|
||||
SecretKeyRef: &corev1.SecretKeySelector{
|
||||
Key: tokenSecretKey,
|
||||
LocalObjectReference: corev1.LocalObjectReference{
|
||||
Name: tokenSecretName,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
envs := []corev1.EnvVar{
|
||||
connectHostEnvVar,
|
||||
connectTokenEnvVar,
|
||||
}
|
||||
|
||||
patch = append(patch, setEnvironment(*container, containerIndex, envs, "/spec/containers")...)
|
||||
|
||||
return patch
|
||||
}
|
||||
|
||||
func (whsvr *WebhookServer) mutateContainer(_ context.Context, container *corev1.Container, containerIndex int) (*corev1.Container, bool, []patchOperation, error) {
|
||||
// Because we are running a command in the pod before starting the container app,
|
||||
// we need to prepend the pod comand with the op run command
|
||||
if len(container.Command) == 0 {
|
||||
return container, false, nil, fmt.Errorf("not attaching OP to the container %s: the podspec does not define a command", container.Name)
|
||||
}
|
||||
|
||||
// Prepend the command with op run --
|
||||
container.Command = append([]string{binVolumeMountPath + "op", "run", "--"}, container.Command...)
|
||||
|
||||
var patch []patchOperation
|
||||
|
||||
// adding the cli to the container using a volume mount
|
||||
path := fmt.Sprintf("%s/%d/volumeMounts", "/spec/containers", containerIndex)
|
||||
patch = append(patch, patchOperation{
|
||||
Op: "add",
|
||||
Path: path,
|
||||
Value: []corev1.VolumeMount{binVolumeMount},
|
||||
})
|
||||
|
||||
// replacing the container command with a command prepended with op run
|
||||
path = fmt.Sprintf("%s/%d/command", "/spec/containers", containerIndex)
|
||||
patch = append(patch, patchOperation{
|
||||
Op: "replace",
|
||||
Path: path,
|
||||
Value: container.Command,
|
||||
})
|
||||
|
||||
//creating patch for adding conenct environment variables to container
|
||||
patch = append(patch, createOPConnectPatch(container, containerIndex, whsvr.Config.ConnectHost, whsvr.Config.ConnectTokenName, whsvr.Config.ConnectTokenKey)...)
|
||||
return container, true, patch, nil
|
||||
}
|
||||
|
||||
func setEnvironment(container corev1.Container, containerIndex int, addedEnv []corev1.EnvVar, basePath string) (patch []patchOperation) {
|
||||
first := len(container.Env) == 0
|
||||
var value interface{}
|
||||
for _, add := range addedEnv {
|
||||
path := fmt.Sprintf("%s/%d/env", basePath, containerIndex)
|
||||
value = add
|
||||
if first {
|
||||
first = false
|
||||
value = []corev1.EnvVar{add}
|
||||
} else {
|
||||
path = path + "/-"
|
||||
}
|
||||
patch = append(patch, patchOperation{
|
||||
Op: "add",
|
||||
Path: path,
|
||||
Value: value,
|
||||
})
|
||||
}
|
||||
return patch
|
||||
}
|
||||
|
||||
// Serve method for webhook server
|
||||
func (whsvr *WebhookServer) Serve(w http.ResponseWriter, r *http.Request) {
|
||||
var body []byte
|
||||
if r.Body != nil {
|
||||
if data, err := ioutil.ReadAll(r.Body); err == nil {
|
||||
body = data
|
||||
}
|
||||
}
|
||||
if len(body) == 0 {
|
||||
glog.Error("empty body")
|
||||
http.Error(w, "empty body", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
// verify the content type is accurate
|
||||
contentType := r.Header.Get("Content-Type")
|
||||
if contentType != "application/json" {
|
||||
glog.Errorf("Content-Type=%s, expect application/json", contentType)
|
||||
http.Error(w, "invalid Content-Type, expect `application/json`", http.StatusUnsupportedMediaType)
|
||||
return
|
||||
}
|
||||
|
||||
var admissionResponse *v1beta1.AdmissionResponse
|
||||
ar := v1beta1.AdmissionReview{}
|
||||
if _, _, err := deserializer.Decode(body, nil, &ar); err != nil {
|
||||
glog.Errorf("Can't decode body: %v", err)
|
||||
admissionResponse = &v1beta1.AdmissionResponse{
|
||||
Result: &metav1.Status{
|
||||
Message: err.Error(),
|
||||
},
|
||||
}
|
||||
} else {
|
||||
admissionResponse = whsvr.mutate(&ar)
|
||||
}
|
||||
|
||||
admissionReview := v1beta1.AdmissionReview{}
|
||||
if admissionResponse != nil {
|
||||
admissionReview.Response = admissionResponse
|
||||
if ar.Request != nil {
|
||||
admissionReview.Response.UID = ar.Request.UID
|
||||
}
|
||||
}
|
||||
|
||||
resp, err := json.Marshal(admissionReview)
|
||||
if err != nil {
|
||||
glog.Errorf("Can't encode response: %v", err)
|
||||
http.Error(w, fmt.Sprintf("could not encode response: %v", err), http.StatusInternalServerError)
|
||||
}
|
||||
glog.Infof("Ready to write reponse ...")
|
||||
if _, err := w.Write(resp); err != nil {
|
||||
glog.Errorf("Can't write response: %v", err)
|
||||
http.Error(w, fmt.Sprintf("could not write response: %v", err), http.StatusInternalServerError)
|
||||
}
|
||||
}
|
9
secret-injector/test/Dockerfile
Normal file
9
secret-injector/test/Dockerfile
Normal file
@@ -0,0 +1,9 @@
|
||||
FROM ubuntu:latest
|
||||
ARG VERSION
|
||||
|
||||
RUN apt-get update && apt-get install -y curl unzip jq && \
|
||||
curl -o 1password.zip https://bucket.agilebits.com/cli-private-beta/v2/op_linux_amd64_v2-alpha2.zip && \
|
||||
unzip 1password.zip -d /usr/local/bin && \
|
||||
rm 1password.zip
|
||||
|
||||
CMD ["op"]
|
202
vendor/github.com/docker/distribution/LICENSE
generated
vendored
Normal file
202
vendor/github.com/docker/distribution/LICENSE
generated
vendored
Normal file
@@ -0,0 +1,202 @@
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "{}"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright {yyyy} {name of copyright owner}
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
|
247
vendor/github.com/docker/distribution/digestset/set.go
generated
vendored
Normal file
247
vendor/github.com/docker/distribution/digestset/set.go
generated
vendored
Normal file
@@ -0,0 +1,247 @@
|
||||
package digestset
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrDigestNotFound is used when a matching digest
|
||||
// could not be found in a set.
|
||||
ErrDigestNotFound = errors.New("digest not found")
|
||||
|
||||
// ErrDigestAmbiguous is used when multiple digests
|
||||
// are found in a set. None of the matching digests
|
||||
// should be considered valid matches.
|
||||
ErrDigestAmbiguous = errors.New("ambiguous digest string")
|
||||
)
|
||||
|
||||
// Set is used to hold a unique set of digests which
|
||||
// may be easily referenced by easily referenced by a string
|
||||
// representation of the digest as well as short representation.
|
||||
// The uniqueness of the short representation is based on other
|
||||
// digests in the set. If digests are omitted from this set,
|
||||
// collisions in a larger set may not be detected, therefore it
|
||||
// is important to always do short representation lookups on
|
||||
// the complete set of digests. To mitigate collisions, an
|
||||
// appropriately long short code should be used.
|
||||
type Set struct {
|
||||
mutex sync.RWMutex
|
||||
entries digestEntries
|
||||
}
|
||||
|
||||
// NewSet creates an empty set of digests
|
||||
// which may have digests added.
|
||||
func NewSet() *Set {
|
||||
return &Set{
|
||||
entries: digestEntries{},
|
||||
}
|
||||
}
|
||||
|
||||
// checkShortMatch checks whether two digests match as either whole
|
||||
// values or short values. This function does not test equality,
|
||||
// rather whether the second value could match against the first
|
||||
// value.
|
||||
func checkShortMatch(alg digest.Algorithm, hex, shortAlg, shortHex string) bool {
|
||||
if len(hex) == len(shortHex) {
|
||||
if hex != shortHex {
|
||||
return false
|
||||
}
|
||||
if len(shortAlg) > 0 && string(alg) != shortAlg {
|
||||
return false
|
||||
}
|
||||
} else if !strings.HasPrefix(hex, shortHex) {
|
||||
return false
|
||||
} else if len(shortAlg) > 0 && string(alg) != shortAlg {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// Lookup looks for a digest matching the given string representation.
|
||||
// If no digests could be found ErrDigestNotFound will be returned
|
||||
// with an empty digest value. If multiple matches are found
|
||||
// ErrDigestAmbiguous will be returned with an empty digest value.
|
||||
func (dst *Set) Lookup(d string) (digest.Digest, error) {
|
||||
dst.mutex.RLock()
|
||||
defer dst.mutex.RUnlock()
|
||||
if len(dst.entries) == 0 {
|
||||
return "", ErrDigestNotFound
|
||||
}
|
||||
var (
|
||||
searchFunc func(int) bool
|
||||
alg digest.Algorithm
|
||||
hex string
|
||||
)
|
||||
dgst, err := digest.Parse(d)
|
||||
if err == digest.ErrDigestInvalidFormat {
|
||||
hex = d
|
||||
searchFunc = func(i int) bool {
|
||||
return dst.entries[i].val >= d
|
||||
}
|
||||
} else {
|
||||
hex = dgst.Hex()
|
||||
alg = dgst.Algorithm()
|
||||
searchFunc = func(i int) bool {
|
||||
if dst.entries[i].val == hex {
|
||||
return dst.entries[i].alg >= alg
|
||||
}
|
||||
return dst.entries[i].val >= hex
|
||||
}
|
||||
}
|
||||
idx := sort.Search(len(dst.entries), searchFunc)
|
||||
if idx == len(dst.entries) || !checkShortMatch(dst.entries[idx].alg, dst.entries[idx].val, string(alg), hex) {
|
||||
return "", ErrDigestNotFound
|
||||
}
|
||||
if dst.entries[idx].alg == alg && dst.entries[idx].val == hex {
|
||||
return dst.entries[idx].digest, nil
|
||||
}
|
||||
if idx+1 < len(dst.entries) && checkShortMatch(dst.entries[idx+1].alg, dst.entries[idx+1].val, string(alg), hex) {
|
||||
return "", ErrDigestAmbiguous
|
||||
}
|
||||
|
||||
return dst.entries[idx].digest, nil
|
||||
}
|
||||
|
||||
// Add adds the given digest to the set. An error will be returned
|
||||
// if the given digest is invalid. If the digest already exists in the
|
||||
// set, this operation will be a no-op.
|
||||
func (dst *Set) Add(d digest.Digest) error {
|
||||
if err := d.Validate(); err != nil {
|
||||
return err
|
||||
}
|
||||
dst.mutex.Lock()
|
||||
defer dst.mutex.Unlock()
|
||||
entry := &digestEntry{alg: d.Algorithm(), val: d.Hex(), digest: d}
|
||||
searchFunc := func(i int) bool {
|
||||
if dst.entries[i].val == entry.val {
|
||||
return dst.entries[i].alg >= entry.alg
|
||||
}
|
||||
return dst.entries[i].val >= entry.val
|
||||
}
|
||||
idx := sort.Search(len(dst.entries), searchFunc)
|
||||
if idx == len(dst.entries) {
|
||||
dst.entries = append(dst.entries, entry)
|
||||
return nil
|
||||
} else if dst.entries[idx].digest == d {
|
||||
return nil
|
||||
}
|
||||
|
||||
entries := append(dst.entries, nil)
|
||||
copy(entries[idx+1:], entries[idx:len(entries)-1])
|
||||
entries[idx] = entry
|
||||
dst.entries = entries
|
||||
return nil
|
||||
}
|
||||
|
||||
// Remove removes the given digest from the set. An err will be
|
||||
// returned if the given digest is invalid. If the digest does
|
||||
// not exist in the set, this operation will be a no-op.
|
||||
func (dst *Set) Remove(d digest.Digest) error {
|
||||
if err := d.Validate(); err != nil {
|
||||
return err
|
||||
}
|
||||
dst.mutex.Lock()
|
||||
defer dst.mutex.Unlock()
|
||||
entry := &digestEntry{alg: d.Algorithm(), val: d.Hex(), digest: d}
|
||||
searchFunc := func(i int) bool {
|
||||
if dst.entries[i].val == entry.val {
|
||||
return dst.entries[i].alg >= entry.alg
|
||||
}
|
||||
return dst.entries[i].val >= entry.val
|
||||
}
|
||||
idx := sort.Search(len(dst.entries), searchFunc)
|
||||
// Not found if idx is after or value at idx is not digest
|
||||
if idx == len(dst.entries) || dst.entries[idx].digest != d {
|
||||
return nil
|
||||
}
|
||||
|
||||
entries := dst.entries
|
||||
copy(entries[idx:], entries[idx+1:])
|
||||
entries = entries[:len(entries)-1]
|
||||
dst.entries = entries
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// All returns all the digests in the set
|
||||
func (dst *Set) All() []digest.Digest {
|
||||
dst.mutex.RLock()
|
||||
defer dst.mutex.RUnlock()
|
||||
retValues := make([]digest.Digest, len(dst.entries))
|
||||
for i := range dst.entries {
|
||||
retValues[i] = dst.entries[i].digest
|
||||
}
|
||||
|
||||
return retValues
|
||||
}
|
||||
|
||||
// ShortCodeTable returns a map of Digest to unique short codes. The
|
||||
// length represents the minimum value, the maximum length may be the
|
||||
// entire value of digest if uniqueness cannot be achieved without the
|
||||
// full value. This function will attempt to make short codes as short
|
||||
// as possible to be unique.
|
||||
func ShortCodeTable(dst *Set, length int) map[digest.Digest]string {
|
||||
dst.mutex.RLock()
|
||||
defer dst.mutex.RUnlock()
|
||||
m := make(map[digest.Digest]string, len(dst.entries))
|
||||
l := length
|
||||
resetIdx := 0
|
||||
for i := 0; i < len(dst.entries); i++ {
|
||||
var short string
|
||||
extended := true
|
||||
for extended {
|
||||
extended = false
|
||||
if len(dst.entries[i].val) <= l {
|
||||
short = dst.entries[i].digest.String()
|
||||
} else {
|
||||
short = dst.entries[i].val[:l]
|
||||
for j := i + 1; j < len(dst.entries); j++ {
|
||||
if checkShortMatch(dst.entries[j].alg, dst.entries[j].val, "", short) {
|
||||
if j > resetIdx {
|
||||
resetIdx = j
|
||||
}
|
||||
extended = true
|
||||
} else {
|
||||
break
|
||||
}
|
||||
}
|
||||
if extended {
|
||||
l++
|
||||
}
|
||||
}
|
||||
}
|
||||
m[dst.entries[i].digest] = short
|
||||
if i >= resetIdx {
|
||||
l = length
|
||||
}
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
type digestEntry struct {
|
||||
alg digest.Algorithm
|
||||
val string
|
||||
digest digest.Digest
|
||||
}
|
||||
|
||||
type digestEntries []*digestEntry
|
||||
|
||||
func (d digestEntries) Len() int {
|
||||
return len(d)
|
||||
}
|
||||
|
||||
func (d digestEntries) Less(i, j int) bool {
|
||||
if d[i].val != d[j].val {
|
||||
return d[i].val < d[j].val
|
||||
}
|
||||
return d[i].alg < d[j].alg
|
||||
}
|
||||
|
||||
func (d digestEntries) Swap(i, j int) {
|
||||
d[i], d[j] = d[j], d[i]
|
||||
}
|
42
vendor/github.com/docker/distribution/reference/helpers.go
generated
vendored
Normal file
42
vendor/github.com/docker/distribution/reference/helpers.go
generated
vendored
Normal file
@@ -0,0 +1,42 @@
|
||||
package reference
|
||||
|
||||
import "path"
|
||||
|
||||
// IsNameOnly returns true if reference only contains a repo name.
|
||||
func IsNameOnly(ref Named) bool {
|
||||
if _, ok := ref.(NamedTagged); ok {
|
||||
return false
|
||||
}
|
||||
if _, ok := ref.(Canonical); ok {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// FamiliarName returns the familiar name string
|
||||
// for the given named, familiarizing if needed.
|
||||
func FamiliarName(ref Named) string {
|
||||
if nn, ok := ref.(normalizedNamed); ok {
|
||||
return nn.Familiar().Name()
|
||||
}
|
||||
return ref.Name()
|
||||
}
|
||||
|
||||
// FamiliarString returns the familiar string representation
|
||||
// for the given reference, familiarizing if needed.
|
||||
func FamiliarString(ref Reference) string {
|
||||
if nn, ok := ref.(normalizedNamed); ok {
|
||||
return nn.Familiar().String()
|
||||
}
|
||||
return ref.String()
|
||||
}
|
||||
|
||||
// FamiliarMatch reports whether ref matches the specified pattern.
|
||||
// See https://godoc.org/path#Match for supported patterns.
|
||||
func FamiliarMatch(pattern string, ref Reference) (bool, error) {
|
||||
matched, err := path.Match(pattern, FamiliarString(ref))
|
||||
if namedRef, isNamed := ref.(Named); isNamed && !matched {
|
||||
matched, _ = path.Match(pattern, FamiliarName(namedRef))
|
||||
}
|
||||
return matched, err
|
||||
}
|
170
vendor/github.com/docker/distribution/reference/normalize.go
generated
vendored
Normal file
170
vendor/github.com/docker/distribution/reference/normalize.go
generated
vendored
Normal file
@@ -0,0 +1,170 @@
|
||||
package reference
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/docker/distribution/digestset"
|
||||
"github.com/opencontainers/go-digest"
|
||||
)
|
||||
|
||||
var (
|
||||
legacyDefaultDomain = "index.docker.io"
|
||||
defaultDomain = "docker.io"
|
||||
officialRepoName = "library"
|
||||
defaultTag = "latest"
|
||||
)
|
||||
|
||||
// normalizedNamed represents a name which has been
|
||||
// normalized and has a familiar form. A familiar name
|
||||
// is what is used in Docker UI. An example normalized
|
||||
// name is "docker.io/library/ubuntu" and corresponding
|
||||
// familiar name of "ubuntu".
|
||||
type normalizedNamed interface {
|
||||
Named
|
||||
Familiar() Named
|
||||
}
|
||||
|
||||
// ParseNormalizedNamed parses a string into a named reference
|
||||
// transforming a familiar name from Docker UI to a fully
|
||||
// qualified reference. If the value may be an identifier
|
||||
// use ParseAnyReference.
|
||||
func ParseNormalizedNamed(s string) (Named, error) {
|
||||
if ok := anchoredIdentifierRegexp.MatchString(s); ok {
|
||||
return nil, fmt.Errorf("invalid repository name (%s), cannot specify 64-byte hexadecimal strings", s)
|
||||
}
|
||||
domain, remainder := splitDockerDomain(s)
|
||||
var remoteName string
|
||||
if tagSep := strings.IndexRune(remainder, ':'); tagSep > -1 {
|
||||
remoteName = remainder[:tagSep]
|
||||
} else {
|
||||
remoteName = remainder
|
||||
}
|
||||
if strings.ToLower(remoteName) != remoteName {
|
||||
return nil, errors.New("invalid reference format: repository name must be lowercase")
|
||||
}
|
||||
|
||||
ref, err := Parse(domain + "/" + remainder)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
named, isNamed := ref.(Named)
|
||||
if !isNamed {
|
||||
return nil, fmt.Errorf("reference %s has no name", ref.String())
|
||||
}
|
||||
return named, nil
|
||||
}
|
||||
|
||||
// splitDockerDomain splits a repository name to domain and remotename string.
|
||||
// If no valid domain is found, the default domain is used. Repository name
|
||||
// needs to be already validated before.
|
||||
func splitDockerDomain(name string) (domain, remainder string) {
|
||||
i := strings.IndexRune(name, '/')
|
||||
if i == -1 || (!strings.ContainsAny(name[:i], ".:") && name[:i] != "localhost") {
|
||||
domain, remainder = defaultDomain, name
|
||||
} else {
|
||||
domain, remainder = name[:i], name[i+1:]
|
||||
}
|
||||
if domain == legacyDefaultDomain {
|
||||
domain = defaultDomain
|
||||
}
|
||||
if domain == defaultDomain && !strings.ContainsRune(remainder, '/') {
|
||||
remainder = officialRepoName + "/" + remainder
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// familiarizeName returns a shortened version of the name familiar
|
||||
// to to the Docker UI. Familiar names have the default domain
|
||||
// "docker.io" and "library/" repository prefix removed.
|
||||
// For example, "docker.io/library/redis" will have the familiar
|
||||
// name "redis" and "docker.io/dmcgowan/myapp" will be "dmcgowan/myapp".
|
||||
// Returns a familiarized named only reference.
|
||||
func familiarizeName(named namedRepository) repository {
|
||||
repo := repository{
|
||||
domain: named.Domain(),
|
||||
path: named.Path(),
|
||||
}
|
||||
|
||||
if repo.domain == defaultDomain {
|
||||
repo.domain = ""
|
||||
// Handle official repositories which have the pattern "library/<official repo name>"
|
||||
if split := strings.Split(repo.path, "/"); len(split) == 2 && split[0] == officialRepoName {
|
||||
repo.path = split[1]
|
||||
}
|
||||
}
|
||||
return repo
|
||||
}
|
||||
|
||||
func (r reference) Familiar() Named {
|
||||
return reference{
|
||||
namedRepository: familiarizeName(r.namedRepository),
|
||||
tag: r.tag,
|
||||
digest: r.digest,
|
||||
}
|
||||
}
|
||||
|
||||
func (r repository) Familiar() Named {
|
||||
return familiarizeName(r)
|
||||
}
|
||||
|
||||
func (t taggedReference) Familiar() Named {
|
||||
return taggedReference{
|
||||
namedRepository: familiarizeName(t.namedRepository),
|
||||
tag: t.tag,
|
||||
}
|
||||
}
|
||||
|
||||
func (c canonicalReference) Familiar() Named {
|
||||
return canonicalReference{
|
||||
namedRepository: familiarizeName(c.namedRepository),
|
||||
digest: c.digest,
|
||||
}
|
||||
}
|
||||
|
||||
// TagNameOnly adds the default tag "latest" to a reference if it only has
|
||||
// a repo name.
|
||||
func TagNameOnly(ref Named) Named {
|
||||
if IsNameOnly(ref) {
|
||||
namedTagged, err := WithTag(ref, defaultTag)
|
||||
if err != nil {
|
||||
// Default tag must be valid, to create a NamedTagged
|
||||
// type with non-validated input the WithTag function
|
||||
// should be used instead
|
||||
panic(err)
|
||||
}
|
||||
return namedTagged
|
||||
}
|
||||
return ref
|
||||
}
|
||||
|
||||
// ParseAnyReference parses a reference string as a possible identifier,
|
||||
// full digest, or familiar name.
|
||||
func ParseAnyReference(ref string) (Reference, error) {
|
||||
if ok := anchoredIdentifierRegexp.MatchString(ref); ok {
|
||||
return digestReference("sha256:" + ref), nil
|
||||
}
|
||||
if dgst, err := digest.Parse(ref); err == nil {
|
||||
return digestReference(dgst), nil
|
||||
}
|
||||
|
||||
return ParseNormalizedNamed(ref)
|
||||
}
|
||||
|
||||
// ParseAnyReferenceWithSet parses a reference string as a possible short
|
||||
// identifier to be matched in a digest set, a full digest, or familiar name.
|
||||
func ParseAnyReferenceWithSet(ref string, ds *digestset.Set) (Reference, error) {
|
||||
if ok := anchoredShortIdentifierRegexp.MatchString(ref); ok {
|
||||
dgst, err := ds.Lookup(ref)
|
||||
if err == nil {
|
||||
return digestReference(dgst), nil
|
||||
}
|
||||
} else {
|
||||
if dgst, err := digest.Parse(ref); err == nil {
|
||||
return digestReference(dgst), nil
|
||||
}
|
||||
}
|
||||
|
||||
return ParseNormalizedNamed(ref)
|
||||
}
|
433
vendor/github.com/docker/distribution/reference/reference.go
generated
vendored
Normal file
433
vendor/github.com/docker/distribution/reference/reference.go
generated
vendored
Normal file
@@ -0,0 +1,433 @@
|
||||
// Package reference provides a general type to represent any way of referencing images within the registry.
|
||||
// Its main purpose is to abstract tags and digests (content-addressable hash).
|
||||
//
|
||||
// Grammar
|
||||
//
|
||||
// reference := name [ ":" tag ] [ "@" digest ]
|
||||
// name := [domain '/'] path-component ['/' path-component]*
|
||||
// domain := domain-component ['.' domain-component]* [':' port-number]
|
||||
// domain-component := /([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])/
|
||||
// port-number := /[0-9]+/
|
||||
// path-component := alpha-numeric [separator alpha-numeric]*
|
||||
// alpha-numeric := /[a-z0-9]+/
|
||||
// separator := /[_.]|__|[-]*/
|
||||
//
|
||||
// tag := /[\w][\w.-]{0,127}/
|
||||
//
|
||||
// digest := digest-algorithm ":" digest-hex
|
||||
// digest-algorithm := digest-algorithm-component [ digest-algorithm-separator digest-algorithm-component ]*
|
||||
// digest-algorithm-separator := /[+.-_]/
|
||||
// digest-algorithm-component := /[A-Za-z][A-Za-z0-9]*/
|
||||
// digest-hex := /[0-9a-fA-F]{32,}/ ; At least 128 bit digest value
|
||||
//
|
||||
// identifier := /[a-f0-9]{64}/
|
||||
// short-identifier := /[a-f0-9]{6,64}/
|
||||
package reference
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/opencontainers/go-digest"
|
||||
)
|
||||
|
||||
const (
|
||||
// NameTotalLengthMax is the maximum total number of characters in a repository name.
|
||||
NameTotalLengthMax = 255
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrReferenceInvalidFormat represents an error while trying to parse a string as a reference.
|
||||
ErrReferenceInvalidFormat = errors.New("invalid reference format")
|
||||
|
||||
// ErrTagInvalidFormat represents an error while trying to parse a string as a tag.
|
||||
ErrTagInvalidFormat = errors.New("invalid tag format")
|
||||
|
||||
// ErrDigestInvalidFormat represents an error while trying to parse a string as a tag.
|
||||
ErrDigestInvalidFormat = errors.New("invalid digest format")
|
||||
|
||||
// ErrNameContainsUppercase is returned for invalid repository names that contain uppercase characters.
|
||||
ErrNameContainsUppercase = errors.New("repository name must be lowercase")
|
||||
|
||||
// ErrNameEmpty is returned for empty, invalid repository names.
|
||||
ErrNameEmpty = errors.New("repository name must have at least one component")
|
||||
|
||||
// ErrNameTooLong is returned when a repository name is longer than NameTotalLengthMax.
|
||||
ErrNameTooLong = fmt.Errorf("repository name must not be more than %v characters", NameTotalLengthMax)
|
||||
|
||||
// ErrNameNotCanonical is returned when a name is not canonical.
|
||||
ErrNameNotCanonical = errors.New("repository name must be canonical")
|
||||
)
|
||||
|
||||
// Reference is an opaque object reference identifier that may include
|
||||
// modifiers such as a hostname, name, tag, and digest.
|
||||
type Reference interface {
|
||||
// String returns the full reference
|
||||
String() string
|
||||
}
|
||||
|
||||
// Field provides a wrapper type for resolving correct reference types when
|
||||
// working with encoding.
|
||||
type Field struct {
|
||||
reference Reference
|
||||
}
|
||||
|
||||
// AsField wraps a reference in a Field for encoding.
|
||||
func AsField(reference Reference) Field {
|
||||
return Field{reference}
|
||||
}
|
||||
|
||||
// Reference unwraps the reference type from the field to
|
||||
// return the Reference object. This object should be
|
||||
// of the appropriate type to further check for different
|
||||
// reference types.
|
||||
func (f Field) Reference() Reference {
|
||||
return f.reference
|
||||
}
|
||||
|
||||
// MarshalText serializes the field to byte text which
|
||||
// is the string of the reference.
|
||||
func (f Field) MarshalText() (p []byte, err error) {
|
||||
return []byte(f.reference.String()), nil
|
||||
}
|
||||
|
||||
// UnmarshalText parses text bytes by invoking the
|
||||
// reference parser to ensure the appropriately
|
||||
// typed reference object is wrapped by field.
|
||||
func (f *Field) UnmarshalText(p []byte) error {
|
||||
r, err := Parse(string(p))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
f.reference = r
|
||||
return nil
|
||||
}
|
||||
|
||||
// Named is an object with a full name
|
||||
type Named interface {
|
||||
Reference
|
||||
Name() string
|
||||
}
|
||||
|
||||
// Tagged is an object which has a tag
|
||||
type Tagged interface {
|
||||
Reference
|
||||
Tag() string
|
||||
}
|
||||
|
||||
// NamedTagged is an object including a name and tag.
|
||||
type NamedTagged interface {
|
||||
Named
|
||||
Tag() string
|
||||
}
|
||||
|
||||
// Digested is an object which has a digest
|
||||
// in which it can be referenced by
|
||||
type Digested interface {
|
||||
Reference
|
||||
Digest() digest.Digest
|
||||
}
|
||||
|
||||
// Canonical reference is an object with a fully unique
|
||||
// name including a name with domain and digest
|
||||
type Canonical interface {
|
||||
Named
|
||||
Digest() digest.Digest
|
||||
}
|
||||
|
||||
// namedRepository is a reference to a repository with a name.
|
||||
// A namedRepository has both domain and path components.
|
||||
type namedRepository interface {
|
||||
Named
|
||||
Domain() string
|
||||
Path() string
|
||||
}
|
||||
|
||||
// Domain returns the domain part of the Named reference
|
||||
func Domain(named Named) string {
|
||||
if r, ok := named.(namedRepository); ok {
|
||||
return r.Domain()
|
||||
}
|
||||
domain, _ := splitDomain(named.Name())
|
||||
return domain
|
||||
}
|
||||
|
||||
// Path returns the name without the domain part of the Named reference
|
||||
func Path(named Named) (name string) {
|
||||
if r, ok := named.(namedRepository); ok {
|
||||
return r.Path()
|
||||
}
|
||||
_, path := splitDomain(named.Name())
|
||||
return path
|
||||
}
|
||||
|
||||
func splitDomain(name string) (string, string) {
|
||||
match := anchoredNameRegexp.FindStringSubmatch(name)
|
||||
if len(match) != 3 {
|
||||
return "", name
|
||||
}
|
||||
return match[1], match[2]
|
||||
}
|
||||
|
||||
// SplitHostname splits a named reference into a
|
||||
// hostname and name string. If no valid hostname is
|
||||
// found, the hostname is empty and the full value
|
||||
// is returned as name
|
||||
// DEPRECATED: Use Domain or Path
|
||||
func SplitHostname(named Named) (string, string) {
|
||||
if r, ok := named.(namedRepository); ok {
|
||||
return r.Domain(), r.Path()
|
||||
}
|
||||
return splitDomain(named.Name())
|
||||
}
|
||||
|
||||
// Parse parses s and returns a syntactically valid Reference.
|
||||
// If an error was encountered it is returned, along with a nil Reference.
|
||||
// NOTE: Parse will not handle short digests.
|
||||
func Parse(s string) (Reference, error) {
|
||||
matches := ReferenceRegexp.FindStringSubmatch(s)
|
||||
if matches == nil {
|
||||
if s == "" {
|
||||
return nil, ErrNameEmpty
|
||||
}
|
||||
if ReferenceRegexp.FindStringSubmatch(strings.ToLower(s)) != nil {
|
||||
return nil, ErrNameContainsUppercase
|
||||
}
|
||||
return nil, ErrReferenceInvalidFormat
|
||||
}
|
||||
|
||||
if len(matches[1]) > NameTotalLengthMax {
|
||||
return nil, ErrNameTooLong
|
||||
}
|
||||
|
||||
var repo repository
|
||||
|
||||
nameMatch := anchoredNameRegexp.FindStringSubmatch(matches[1])
|
||||
if nameMatch != nil && len(nameMatch) == 3 {
|
||||
repo.domain = nameMatch[1]
|
||||
repo.path = nameMatch[2]
|
||||
} else {
|
||||
repo.domain = ""
|
||||
repo.path = matches[1]
|
||||
}
|
||||
|
||||
ref := reference{
|
||||
namedRepository: repo,
|
||||
tag: matches[2],
|
||||
}
|
||||
if matches[3] != "" {
|
||||
var err error
|
||||
ref.digest, err = digest.Parse(matches[3])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
r := getBestReferenceType(ref)
|
||||
if r == nil {
|
||||
return nil, ErrNameEmpty
|
||||
}
|
||||
|
||||
return r, nil
|
||||
}
|
||||
|
||||
// ParseNamed parses s and returns a syntactically valid reference implementing
|
||||
// the Named interface. The reference must have a name and be in the canonical
|
||||
// form, otherwise an error is returned.
|
||||
// If an error was encountered it is returned, along with a nil Reference.
|
||||
// NOTE: ParseNamed will not handle short digests.
|
||||
func ParseNamed(s string) (Named, error) {
|
||||
named, err := ParseNormalizedNamed(s)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if named.String() != s {
|
||||
return nil, ErrNameNotCanonical
|
||||
}
|
||||
return named, nil
|
||||
}
|
||||
|
||||
// WithName returns a named object representing the given string. If the input
|
||||
// is invalid ErrReferenceInvalidFormat will be returned.
|
||||
func WithName(name string) (Named, error) {
|
||||
if len(name) > NameTotalLengthMax {
|
||||
return nil, ErrNameTooLong
|
||||
}
|
||||
|
||||
match := anchoredNameRegexp.FindStringSubmatch(name)
|
||||
if match == nil || len(match) != 3 {
|
||||
return nil, ErrReferenceInvalidFormat
|
||||
}
|
||||
return repository{
|
||||
domain: match[1],
|
||||
path: match[2],
|
||||
}, nil
|
||||
}
|
||||
|
||||
// WithTag combines the name from "name" and the tag from "tag" to form a
|
||||
// reference incorporating both the name and the tag.
|
||||
func WithTag(name Named, tag string) (NamedTagged, error) {
|
||||
if !anchoredTagRegexp.MatchString(tag) {
|
||||
return nil, ErrTagInvalidFormat
|
||||
}
|
||||
var repo repository
|
||||
if r, ok := name.(namedRepository); ok {
|
||||
repo.domain = r.Domain()
|
||||
repo.path = r.Path()
|
||||
} else {
|
||||
repo.path = name.Name()
|
||||
}
|
||||
if canonical, ok := name.(Canonical); ok {
|
||||
return reference{
|
||||
namedRepository: repo,
|
||||
tag: tag,
|
||||
digest: canonical.Digest(),
|
||||
}, nil
|
||||
}
|
||||
return taggedReference{
|
||||
namedRepository: repo,
|
||||
tag: tag,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// WithDigest combines the name from "name" and the digest from "digest" to form
|
||||
// a reference incorporating both the name and the digest.
|
||||
func WithDigest(name Named, digest digest.Digest) (Canonical, error) {
|
||||
if !anchoredDigestRegexp.MatchString(digest.String()) {
|
||||
return nil, ErrDigestInvalidFormat
|
||||
}
|
||||
var repo repository
|
||||
if r, ok := name.(namedRepository); ok {
|
||||
repo.domain = r.Domain()
|
||||
repo.path = r.Path()
|
||||
} else {
|
||||
repo.path = name.Name()
|
||||
}
|
||||
if tagged, ok := name.(Tagged); ok {
|
||||
return reference{
|
||||
namedRepository: repo,
|
||||
tag: tagged.Tag(),
|
||||
digest: digest,
|
||||
}, nil
|
||||
}
|
||||
return canonicalReference{
|
||||
namedRepository: repo,
|
||||
digest: digest,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// TrimNamed removes any tag or digest from the named reference.
|
||||
func TrimNamed(ref Named) Named {
|
||||
domain, path := SplitHostname(ref)
|
||||
return repository{
|
||||
domain: domain,
|
||||
path: path,
|
||||
}
|
||||
}
|
||||
|
||||
func getBestReferenceType(ref reference) Reference {
|
||||
if ref.Name() == "" {
|
||||
// Allow digest only references
|
||||
if ref.digest != "" {
|
||||
return digestReference(ref.digest)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
if ref.tag == "" {
|
||||
if ref.digest != "" {
|
||||
return canonicalReference{
|
||||
namedRepository: ref.namedRepository,
|
||||
digest: ref.digest,
|
||||
}
|
||||
}
|
||||
return ref.namedRepository
|
||||
}
|
||||
if ref.digest == "" {
|
||||
return taggedReference{
|
||||
namedRepository: ref.namedRepository,
|
||||
tag: ref.tag,
|
||||
}
|
||||
}
|
||||
|
||||
return ref
|
||||
}
|
||||
|
||||
type reference struct {
|
||||
namedRepository
|
||||
tag string
|
||||
digest digest.Digest
|
||||
}
|
||||
|
||||
func (r reference) String() string {
|
||||
return r.Name() + ":" + r.tag + "@" + r.digest.String()
|
||||
}
|
||||
|
||||
func (r reference) Tag() string {
|
||||
return r.tag
|
||||
}
|
||||
|
||||
func (r reference) Digest() digest.Digest {
|
||||
return r.digest
|
||||
}
|
||||
|
||||
type repository struct {
|
||||
domain string
|
||||
path string
|
||||
}
|
||||
|
||||
func (r repository) String() string {
|
||||
return r.Name()
|
||||
}
|
||||
|
||||
func (r repository) Name() string {
|
||||
if r.domain == "" {
|
||||
return r.path
|
||||
}
|
||||
return r.domain + "/" + r.path
|
||||
}
|
||||
|
||||
func (r repository) Domain() string {
|
||||
return r.domain
|
||||
}
|
||||
|
||||
func (r repository) Path() string {
|
||||
return r.path
|
||||
}
|
||||
|
||||
type digestReference digest.Digest
|
||||
|
||||
func (d digestReference) String() string {
|
||||
return digest.Digest(d).String()
|
||||
}
|
||||
|
||||
func (d digestReference) Digest() digest.Digest {
|
||||
return digest.Digest(d)
|
||||
}
|
||||
|
||||
type taggedReference struct {
|
||||
namedRepository
|
||||
tag string
|
||||
}
|
||||
|
||||
func (t taggedReference) String() string {
|
||||
return t.Name() + ":" + t.tag
|
||||
}
|
||||
|
||||
func (t taggedReference) Tag() string {
|
||||
return t.tag
|
||||
}
|
||||
|
||||
type canonicalReference struct {
|
||||
namedRepository
|
||||
digest digest.Digest
|
||||
}
|
||||
|
||||
func (c canonicalReference) String() string {
|
||||
return c.Name() + "@" + c.digest.String()
|
||||
}
|
||||
|
||||
func (c canonicalReference) Digest() digest.Digest {
|
||||
return c.digest
|
||||
}
|
143
vendor/github.com/docker/distribution/reference/regexp.go
generated
vendored
Normal file
143
vendor/github.com/docker/distribution/reference/regexp.go
generated
vendored
Normal file
@@ -0,0 +1,143 @@
|
||||
package reference
|
||||
|
||||
import "regexp"
|
||||
|
||||
var (
|
||||
// alphaNumericRegexp defines the alpha numeric atom, typically a
|
||||
// component of names. This only allows lower case characters and digits.
|
||||
alphaNumericRegexp = match(`[a-z0-9]+`)
|
||||
|
||||
// separatorRegexp defines the separators allowed to be embedded in name
|
||||
// components. This allow one period, one or two underscore and multiple
|
||||
// dashes.
|
||||
separatorRegexp = match(`(?:[._]|__|[-]*)`)
|
||||
|
||||
// nameComponentRegexp restricts registry path component names to start
|
||||
// with at least one letter or number, with following parts able to be
|
||||
// separated by one period, one or two underscore and multiple dashes.
|
||||
nameComponentRegexp = expression(
|
||||
alphaNumericRegexp,
|
||||
optional(repeated(separatorRegexp, alphaNumericRegexp)))
|
||||
|
||||
// domainComponentRegexp restricts the registry domain component of a
|
||||
// repository name to start with a component as defined by DomainRegexp
|
||||
// and followed by an optional port.
|
||||
domainComponentRegexp = match(`(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])`)
|
||||
|
||||
// DomainRegexp defines the structure of potential domain components
|
||||
// that may be part of image names. This is purposely a subset of what is
|
||||
// allowed by DNS to ensure backwards compatibility with Docker image
|
||||
// names.
|
||||
DomainRegexp = expression(
|
||||
domainComponentRegexp,
|
||||
optional(repeated(literal(`.`), domainComponentRegexp)),
|
||||
optional(literal(`:`), match(`[0-9]+`)))
|
||||
|
||||
// TagRegexp matches valid tag names. From docker/docker:graph/tags.go.
|
||||
TagRegexp = match(`[\w][\w.-]{0,127}`)
|
||||
|
||||
// anchoredTagRegexp matches valid tag names, anchored at the start and
|
||||
// end of the matched string.
|
||||
anchoredTagRegexp = anchored(TagRegexp)
|
||||
|
||||
// DigestRegexp matches valid digests.
|
||||
DigestRegexp = match(`[A-Za-z][A-Za-z0-9]*(?:[-_+.][A-Za-z][A-Za-z0-9]*)*[:][[:xdigit:]]{32,}`)
|
||||
|
||||
// anchoredDigestRegexp matches valid digests, anchored at the start and
|
||||
// end of the matched string.
|
||||
anchoredDigestRegexp = anchored(DigestRegexp)
|
||||
|
||||
// NameRegexp is the format for the name component of references. The
|
||||
// regexp has capturing groups for the domain and name part omitting
|
||||
// the separating forward slash from either.
|
||||
NameRegexp = expression(
|
||||
optional(DomainRegexp, literal(`/`)),
|
||||
nameComponentRegexp,
|
||||
optional(repeated(literal(`/`), nameComponentRegexp)))
|
||||
|
||||
// anchoredNameRegexp is used to parse a name value, capturing the
|
||||
// domain and trailing components.
|
||||
anchoredNameRegexp = anchored(
|
||||
optional(capture(DomainRegexp), literal(`/`)),
|
||||
capture(nameComponentRegexp,
|
||||
optional(repeated(literal(`/`), nameComponentRegexp))))
|
||||
|
||||
// ReferenceRegexp is the full supported format of a reference. The regexp
|
||||
// is anchored and has capturing groups for name, tag, and digest
|
||||
// components.
|
||||
ReferenceRegexp = anchored(capture(NameRegexp),
|
||||
optional(literal(":"), capture(TagRegexp)),
|
||||
optional(literal("@"), capture(DigestRegexp)))
|
||||
|
||||
// IdentifierRegexp is the format for string identifier used as a
|
||||
// content addressable identifier using sha256. These identifiers
|
||||
// are like digests without the algorithm, since sha256 is used.
|
||||
IdentifierRegexp = match(`([a-f0-9]{64})`)
|
||||
|
||||
// ShortIdentifierRegexp is the format used to represent a prefix
|
||||
// of an identifier. A prefix may be used to match a sha256 identifier
|
||||
// within a list of trusted identifiers.
|
||||
ShortIdentifierRegexp = match(`([a-f0-9]{6,64})`)
|
||||
|
||||
// anchoredIdentifierRegexp is used to check or match an
|
||||
// identifier value, anchored at start and end of string.
|
||||
anchoredIdentifierRegexp = anchored(IdentifierRegexp)
|
||||
|
||||
// anchoredShortIdentifierRegexp is used to check if a value
|
||||
// is a possible identifier prefix, anchored at start and end
|
||||
// of string.
|
||||
anchoredShortIdentifierRegexp = anchored(ShortIdentifierRegexp)
|
||||
)
|
||||
|
||||
// match compiles the string to a regular expression.
|
||||
var match = regexp.MustCompile
|
||||
|
||||
// literal compiles s into a literal regular expression, escaping any regexp
|
||||
// reserved characters.
|
||||
func literal(s string) *regexp.Regexp {
|
||||
re := match(regexp.QuoteMeta(s))
|
||||
|
||||
if _, complete := re.LiteralPrefix(); !complete {
|
||||
panic("must be a literal")
|
||||
}
|
||||
|
||||
return re
|
||||
}
|
||||
|
||||
// expression defines a full expression, where each regular expression must
|
||||
// follow the previous.
|
||||
func expression(res ...*regexp.Regexp) *regexp.Regexp {
|
||||
var s string
|
||||
for _, re := range res {
|
||||
s += re.String()
|
||||
}
|
||||
|
||||
return match(s)
|
||||
}
|
||||
|
||||
// optional wraps the expression in a non-capturing group and makes the
|
||||
// production optional.
|
||||
func optional(res ...*regexp.Regexp) *regexp.Regexp {
|
||||
return match(group(expression(res...)).String() + `?`)
|
||||
}
|
||||
|
||||
// repeated wraps the regexp in a non-capturing group to get one or more
|
||||
// matches.
|
||||
func repeated(res ...*regexp.Regexp) *regexp.Regexp {
|
||||
return match(group(expression(res...)).String() + `+`)
|
||||
}
|
||||
|
||||
// group wraps the regexp in a non-capturing group.
|
||||
func group(res ...*regexp.Regexp) *regexp.Regexp {
|
||||
return match(`(?:` + expression(res...).String() + `)`)
|
||||
}
|
||||
|
||||
// capture wraps the expression in a capturing group.
|
||||
func capture(res ...*regexp.Regexp) *regexp.Regexp {
|
||||
return match(`(` + expression(res...).String() + `)`)
|
||||
}
|
||||
|
||||
// anchored anchors the regular expression by adding start and end delimiters.
|
||||
func anchored(res ...*regexp.Regexp) *regexp.Regexp {
|
||||
return match(`^` + expression(res...).String() + `$`)
|
||||
}
|
191
vendor/github.com/golang/glog/LICENSE
generated
vendored
Normal file
191
vendor/github.com/golang/glog/LICENSE
generated
vendored
Normal file
@@ -0,0 +1,191 @@
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction, and
|
||||
distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by the copyright
|
||||
owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all other entities
|
||||
that control, are controlled by, or are under common control with that entity.
|
||||
For the purposes of this definition, "control" means (i) the power, direct or
|
||||
indirect, to cause the direction or management of such entity, whether by
|
||||
contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity exercising
|
||||
permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications, including
|
||||
but not limited to software source code, documentation source, and configuration
|
||||
files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical transformation or
|
||||
translation of a Source form, including but not limited to compiled object code,
|
||||
generated documentation, and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or Object form, made
|
||||
available under the License, as indicated by a copyright notice that is included
|
||||
in or attached to the work (an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object form, that
|
||||
is based on (or derived from) the Work and for which the editorial revisions,
|
||||
annotations, elaborations, or other modifications represent, as a whole, an
|
||||
original work of authorship. For the purposes of this License, Derivative Works
|
||||
shall not include works that remain separable from, or merely link (or bind by
|
||||
name) to the interfaces of, the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including the original version
|
||||
of the Work and any modifications or additions to that Work or Derivative Works
|
||||
thereof, that is intentionally submitted to Licensor for inclusion in the Work
|
||||
by the copyright owner or by an individual or Legal Entity authorized to submit
|
||||
on behalf of the copyright owner. For the purposes of this definition,
|
||||
"submitted" means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems, and
|
||||
issue tracking systems that are managed by, or on behalf of, the Licensor for
|
||||
the purpose of discussing and improving the Work, but excluding communication
|
||||
that is conspicuously marked or otherwise designated in writing by the copyright
|
||||
owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity on behalf
|
||||
of whom a Contribution has been received by Licensor and subsequently
|
||||
incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License.
|
||||
|
||||
Subject to the terms and conditions of this License, each Contributor hereby
|
||||
grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
|
||||
irrevocable copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the Work and such
|
||||
Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License.
|
||||
|
||||
Subject to the terms and conditions of this License, each Contributor hereby
|
||||
grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
|
||||
irrevocable (except as stated in this section) patent license to make, have
|
||||
made, use, offer to sell, sell, import, and otherwise transfer the Work, where
|
||||
such license applies only to those patent claims licensable by such Contributor
|
||||
that are necessarily infringed by their Contribution(s) alone or by combination
|
||||
of their Contribution(s) with the Work to which such Contribution(s) was
|
||||
submitted. If You institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work or a
|
||||
Contribution incorporated within the Work constitutes direct or contributory
|
||||
patent infringement, then any patent licenses granted to You under this License
|
||||
for that Work shall terminate as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution.
|
||||
|
||||
You may reproduce and distribute copies of the Work or Derivative Works thereof
|
||||
in any medium, with or without modifications, and in Source or Object form,
|
||||
provided that You meet the following conditions:
|
||||
|
||||
You must give any other recipients of the Work or Derivative Works a copy of
|
||||
this License; and
|
||||
You must cause any modified files to carry prominent notices stating that You
|
||||
changed the files; and
|
||||
You must retain, in the Source form of any Derivative Works that You distribute,
|
||||
all copyright, patent, trademark, and attribution notices from the Source form
|
||||
of the Work, excluding those notices that do not pertain to any part of the
|
||||
Derivative Works; and
|
||||
If the Work includes a "NOTICE" text file as part of its distribution, then any
|
||||
Derivative Works that You distribute must include a readable copy of the
|
||||
attribution notices contained within such NOTICE file, excluding those notices
|
||||
that do not pertain to any part of the Derivative Works, in at least one of the
|
||||
following places: within a NOTICE text file distributed as part of the
|
||||
Derivative Works; within the Source form or documentation, if provided along
|
||||
with the Derivative Works; or, within a display generated by the Derivative
|
||||
Works, if and wherever such third-party notices normally appear. The contents of
|
||||
the NOTICE file are for informational purposes only and do not modify the
|
||||
License. You may add Your own attribution notices within Derivative Works that
|
||||
You distribute, alongside or as an addendum to the NOTICE text from the Work,
|
||||
provided that such additional attribution notices cannot be construed as
|
||||
modifying the License.
|
||||
You may add Your own copyright statement to Your modifications and may provide
|
||||
additional or different license terms and conditions for use, reproduction, or
|
||||
distribution of Your modifications, or for any such Derivative Works as a whole,
|
||||
provided Your use, reproduction, and distribution of the Work otherwise complies
|
||||
with the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions.
|
||||
|
||||
Unless You explicitly state otherwise, any Contribution intentionally submitted
|
||||
for inclusion in the Work by You to the Licensor shall be under the terms and
|
||||
conditions of this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify the terms of
|
||||
any separate license agreement you may have executed with Licensor regarding
|
||||
such Contributions.
|
||||
|
||||
6. Trademarks.
|
||||
|
||||
This License does not grant permission to use the trade names, trademarks,
|
||||
service marks, or product names of the Licensor, except as required for
|
||||
reasonable and customary use in describing the origin of the Work and
|
||||
reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty.
|
||||
|
||||
Unless required by applicable law or agreed to in writing, Licensor provides the
|
||||
Work (and each Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied,
|
||||
including, without limitation, any warranties or conditions of TITLE,
|
||||
NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are
|
||||
solely responsible for determining the appropriateness of using or
|
||||
redistributing the Work and assume any risks associated with Your exercise of
|
||||
permissions under this License.
|
||||
|
||||
8. Limitation of Liability.
|
||||
|
||||
In no event and under no legal theory, whether in tort (including negligence),
|
||||
contract, or otherwise, unless required by applicable law (such as deliberate
|
||||
and grossly negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special, incidental,
|
||||
or consequential damages of any character arising as a result of this License or
|
||||
out of the use or inability to use the Work (including but not limited to
|
||||
damages for loss of goodwill, work stoppage, computer failure or malfunction, or
|
||||
any and all other commercial damages or losses), even if such Contributor has
|
||||
been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability.
|
||||
|
||||
While redistributing the Work or Derivative Works thereof, You may choose to
|
||||
offer, and charge a fee for, acceptance of support, warranty, indemnity, or
|
||||
other liability obligations and/or rights consistent with this License. However,
|
||||
in accepting such obligations, You may act only on Your own behalf and on Your
|
||||
sole responsibility, not on behalf of any other Contributor, and only if You
|
||||
agree to indemnify, defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason of your
|
||||
accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work
|
||||
|
||||
To apply the Apache License to your work, attach the following boilerplate
|
||||
notice, with the fields enclosed by brackets "[]" replaced with your own
|
||||
identifying information. (Don't include the brackets!) The text should be
|
||||
enclosed in the appropriate comment syntax for the file format. We also
|
||||
recommend that a file or class name and description of purpose be included on
|
||||
the same "printed page" as the copyright notice for easier identification within
|
||||
third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
44
vendor/github.com/golang/glog/README
generated
vendored
Normal file
44
vendor/github.com/golang/glog/README
generated
vendored
Normal file
@@ -0,0 +1,44 @@
|
||||
glog
|
||||
====
|
||||
|
||||
Leveled execution logs for Go.
|
||||
|
||||
This is an efficient pure Go implementation of leveled logs in the
|
||||
manner of the open source C++ package
|
||||
https://github.com/google/glog
|
||||
|
||||
By binding methods to booleans it is possible to use the log package
|
||||
without paying the expense of evaluating the arguments to the log.
|
||||
Through the -vmodule flag, the package also provides fine-grained
|
||||
control over logging at the file level.
|
||||
|
||||
The comment from glog.go introduces the ideas:
|
||||
|
||||
Package glog implements logging analogous to the Google-internal
|
||||
C++ INFO/ERROR/V setup. It provides functions Info, Warning,
|
||||
Error, Fatal, plus formatting variants such as Infof. It
|
||||
also provides V-style logging controlled by the -v and
|
||||
-vmodule=file=2 flags.
|
||||
|
||||
Basic examples:
|
||||
|
||||
glog.Info("Prepare to repel boarders")
|
||||
|
||||
glog.Fatalf("Initialization failed: %s", err)
|
||||
|
||||
See the documentation for the V function for an explanation
|
||||
of these examples:
|
||||
|
||||
if glog.V(2) {
|
||||
glog.Info("Starting transaction...")
|
||||
}
|
||||
|
||||
glog.V(2).Infoln("Processed", nItems, "elements")
|
||||
|
||||
|
||||
The repository contains an open source version of the log package
|
||||
used inside Google. The master copy of the source lives inside
|
||||
Google, not here. The code in this repo is for export only and is not itself
|
||||
under development. Feature requests will be ignored.
|
||||
|
||||
Send bug reports to golang-nuts@googlegroups.com.
|
1180
vendor/github.com/golang/glog/glog.go
generated
vendored
Normal file
1180
vendor/github.com/golang/glog/glog.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
124
vendor/github.com/golang/glog/glog_file.go
generated
vendored
Normal file
124
vendor/github.com/golang/glog/glog_file.go
generated
vendored
Normal file
@@ -0,0 +1,124 @@
|
||||
// Go support for leveled logs, analogous to https://code.google.com/p/google-glog/
|
||||
//
|
||||
// Copyright 2013 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// File I/O for logs.
|
||||
|
||||
package glog
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"flag"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/user"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
// MaxSize is the maximum size of a log file in bytes.
|
||||
var MaxSize uint64 = 1024 * 1024 * 1800
|
||||
|
||||
// logDirs lists the candidate directories for new log files.
|
||||
var logDirs []string
|
||||
|
||||
// If non-empty, overrides the choice of directory in which to write logs.
|
||||
// See createLogDirs for the full list of possible destinations.
|
||||
var logDir = flag.String("log_dir", "", "If non-empty, write log files in this directory")
|
||||
|
||||
func createLogDirs() {
|
||||
if *logDir != "" {
|
||||
logDirs = append(logDirs, *logDir)
|
||||
}
|
||||
logDirs = append(logDirs, os.TempDir())
|
||||
}
|
||||
|
||||
var (
|
||||
pid = os.Getpid()
|
||||
program = filepath.Base(os.Args[0])
|
||||
host = "unknownhost"
|
||||
userName = "unknownuser"
|
||||
)
|
||||
|
||||
func init() {
|
||||
h, err := os.Hostname()
|
||||
if err == nil {
|
||||
host = shortHostname(h)
|
||||
}
|
||||
|
||||
current, err := user.Current()
|
||||
if err == nil {
|
||||
userName = current.Username
|
||||
}
|
||||
|
||||
// Sanitize userName since it may contain filepath separators on Windows.
|
||||
userName = strings.Replace(userName, `\`, "_", -1)
|
||||
}
|
||||
|
||||
// shortHostname returns its argument, truncating at the first period.
|
||||
// For instance, given "www.google.com" it returns "www".
|
||||
func shortHostname(hostname string) string {
|
||||
if i := strings.Index(hostname, "."); i >= 0 {
|
||||
return hostname[:i]
|
||||
}
|
||||
return hostname
|
||||
}
|
||||
|
||||
// logName returns a new log file name containing tag, with start time t, and
|
||||
// the name for the symlink for tag.
|
||||
func logName(tag string, t time.Time) (name, link string) {
|
||||
name = fmt.Sprintf("%s.%s.%s.log.%s.%04d%02d%02d-%02d%02d%02d.%d",
|
||||
program,
|
||||
host,
|
||||
userName,
|
||||
tag,
|
||||
t.Year(),
|
||||
t.Month(),
|
||||
t.Day(),
|
||||
t.Hour(),
|
||||
t.Minute(),
|
||||
t.Second(),
|
||||
pid)
|
||||
return name, program + "." + tag
|
||||
}
|
||||
|
||||
var onceLogDirs sync.Once
|
||||
|
||||
// create creates a new log file and returns the file and its filename, which
|
||||
// contains tag ("INFO", "FATAL", etc.) and t. If the file is created
|
||||
// successfully, create also attempts to update the symlink for that tag, ignoring
|
||||
// errors.
|
||||
func create(tag string, t time.Time) (f *os.File, filename string, err error) {
|
||||
onceLogDirs.Do(createLogDirs)
|
||||
if len(logDirs) == 0 {
|
||||
return nil, "", errors.New("log: no log dirs")
|
||||
}
|
||||
name, link := logName(tag, t)
|
||||
var lastErr error
|
||||
for _, dir := range logDirs {
|
||||
fname := filepath.Join(dir, name)
|
||||
f, err := os.Create(fname)
|
||||
if err == nil {
|
||||
symlink := filepath.Join(dir, link)
|
||||
os.Remove(symlink) // ignore err
|
||||
os.Symlink(name, symlink) // ignore err
|
||||
return f, fname, nil
|
||||
}
|
||||
lastErr = err
|
||||
}
|
||||
return nil, "", fmt.Errorf("log: cannot create log: %v", lastErr)
|
||||
}
|
4
vendor/github.com/google/uuid/hash.go
generated
vendored
4
vendor/github.com/google/uuid/hash.go
generated
vendored
@@ -26,8 +26,8 @@ var (
|
||||
// NewMD5 and NewSHA1.
|
||||
func NewHash(h hash.Hash, space UUID, data []byte, version int) UUID {
|
||||
h.Reset()
|
||||
h.Write(space[:])
|
||||
h.Write(data)
|
||||
h.Write(space[:]) //nolint:errcheck
|
||||
h.Write(data) //nolint:errcheck
|
||||
s := h.Sum(nil)
|
||||
var uuid UUID
|
||||
copy(uuid[:], s)
|
||||
|
118
vendor/github.com/google/uuid/null.go
generated
vendored
Normal file
118
vendor/github.com/google/uuid/null.go
generated
vendored
Normal file
@@ -0,0 +1,118 @@
|
||||
// Copyright 2021 Google Inc. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package uuid
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"database/sql/driver"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
var jsonNull = []byte("null")
|
||||
|
||||
// NullUUID represents a UUID that may be null.
|
||||
// NullUUID implements the SQL driver.Scanner interface so
|
||||
// it can be used as a scan destination:
|
||||
//
|
||||
// var u uuid.NullUUID
|
||||
// err := db.QueryRow("SELECT name FROM foo WHERE id=?", id).Scan(&u)
|
||||
// ...
|
||||
// if u.Valid {
|
||||
// // use u.UUID
|
||||
// } else {
|
||||
// // NULL value
|
||||
// }
|
||||
//
|
||||
type NullUUID struct {
|
||||
UUID UUID
|
||||
Valid bool // Valid is true if UUID is not NULL
|
||||
}
|
||||
|
||||
// Scan implements the SQL driver.Scanner interface.
|
||||
func (nu *NullUUID) Scan(value interface{}) error {
|
||||
if value == nil {
|
||||
nu.UUID, nu.Valid = Nil, false
|
||||
return nil
|
||||
}
|
||||
|
||||
err := nu.UUID.Scan(value)
|
||||
if err != nil {
|
||||
nu.Valid = false
|
||||
return err
|
||||
}
|
||||
|
||||
nu.Valid = true
|
||||
return nil
|
||||
}
|
||||
|
||||
// Value implements the driver Valuer interface.
|
||||
func (nu NullUUID) Value() (driver.Value, error) {
|
||||
if !nu.Valid {
|
||||
return nil, nil
|
||||
}
|
||||
// Delegate to UUID Value function
|
||||
return nu.UUID.Value()
|
||||
}
|
||||
|
||||
// MarshalBinary implements encoding.BinaryMarshaler.
|
||||
func (nu NullUUID) MarshalBinary() ([]byte, error) {
|
||||
if nu.Valid {
|
||||
return nu.UUID[:], nil
|
||||
}
|
||||
|
||||
return []byte(nil), nil
|
||||
}
|
||||
|
||||
// UnmarshalBinary implements encoding.BinaryUnmarshaler.
|
||||
func (nu *NullUUID) UnmarshalBinary(data []byte) error {
|
||||
if len(data) != 16 {
|
||||
return fmt.Errorf("invalid UUID (got %d bytes)", len(data))
|
||||
}
|
||||
copy(nu.UUID[:], data)
|
||||
nu.Valid = true
|
||||
return nil
|
||||
}
|
||||
|
||||
// MarshalText implements encoding.TextMarshaler.
|
||||
func (nu NullUUID) MarshalText() ([]byte, error) {
|
||||
if nu.Valid {
|
||||
return nu.UUID.MarshalText()
|
||||
}
|
||||
|
||||
return jsonNull, nil
|
||||
}
|
||||
|
||||
// UnmarshalText implements encoding.TextUnmarshaler.
|
||||
func (nu *NullUUID) UnmarshalText(data []byte) error {
|
||||
id, err := ParseBytes(data)
|
||||
if err != nil {
|
||||
nu.Valid = false
|
||||
return err
|
||||
}
|
||||
nu.UUID = id
|
||||
nu.Valid = true
|
||||
return nil
|
||||
}
|
||||
|
||||
// MarshalJSON implements json.Marshaler.
|
||||
func (nu NullUUID) MarshalJSON() ([]byte, error) {
|
||||
if nu.Valid {
|
||||
return json.Marshal(nu.UUID)
|
||||
}
|
||||
|
||||
return jsonNull, nil
|
||||
}
|
||||
|
||||
// UnmarshalJSON implements json.Unmarshaler.
|
||||
func (nu *NullUUID) UnmarshalJSON(data []byte) error {
|
||||
if bytes.Equal(data, jsonNull) {
|
||||
*nu = NullUUID{}
|
||||
return nil // valid null UUID
|
||||
}
|
||||
err := json.Unmarshal(data, &nu.UUID)
|
||||
nu.Valid = err == nil
|
||||
return err
|
||||
}
|
2
vendor/github.com/google/uuid/sql.go
generated
vendored
2
vendor/github.com/google/uuid/sql.go
generated
vendored
@@ -9,7 +9,7 @@ import (
|
||||
"fmt"
|
||||
)
|
||||
|
||||
// Scan implements sql.Scanner so UUIDs can be read from databases transparently
|
||||
// Scan implements sql.Scanner so UUIDs can be read from databases transparently.
|
||||
// Currently, database types that map to string and []byte are supported. Please
|
||||
// consult database-specific driver documentation for matching types.
|
||||
func (uuid *UUID) Scan(src interface{}) error {
|
||||
|
55
vendor/github.com/google/uuid/uuid.go
generated
vendored
55
vendor/github.com/google/uuid/uuid.go
generated
vendored
@@ -12,6 +12,7 @@ import (
|
||||
"fmt"
|
||||
"io"
|
||||
"strings"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// A UUID is a 128 bit (16 byte) Universal Unique IDentifier as defined in RFC
|
||||
@@ -33,7 +34,27 @@ const (
|
||||
Future // Reserved for future definition.
|
||||
)
|
||||
|
||||
var rander = rand.Reader // random function
|
||||
const randPoolSize = 16 * 16
|
||||
|
||||
var (
|
||||
rander = rand.Reader // random function
|
||||
poolEnabled = false
|
||||
poolMu sync.Mutex
|
||||
poolPos = randPoolSize // protected with poolMu
|
||||
pool [randPoolSize]byte // protected with poolMu
|
||||
)
|
||||
|
||||
type invalidLengthError struct{ len int }
|
||||
|
||||
func (err invalidLengthError) Error() string {
|
||||
return fmt.Sprintf("invalid UUID length: %d", err.len)
|
||||
}
|
||||
|
||||
// IsInvalidLengthError is matcher function for custom error invalidLengthError
|
||||
func IsInvalidLengthError(err error) bool {
|
||||
_, ok := err.(invalidLengthError)
|
||||
return ok
|
||||
}
|
||||
|
||||
// Parse decodes s into a UUID or returns an error. Both the standard UUID
|
||||
// forms of xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx and
|
||||
@@ -68,7 +89,7 @@ func Parse(s string) (UUID, error) {
|
||||
}
|
||||
return uuid, nil
|
||||
default:
|
||||
return uuid, fmt.Errorf("invalid UUID length: %d", len(s))
|
||||
return uuid, invalidLengthError{len(s)}
|
||||
}
|
||||
// s is now at least 36 bytes long
|
||||
// it must be of the form xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
|
||||
@@ -112,7 +133,7 @@ func ParseBytes(b []byte) (UUID, error) {
|
||||
}
|
||||
return uuid, nil
|
||||
default:
|
||||
return uuid, fmt.Errorf("invalid UUID length: %d", len(b))
|
||||
return uuid, invalidLengthError{len(b)}
|
||||
}
|
||||
// s is now at least 36 bytes long
|
||||
// it must be of the form xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
|
||||
@@ -243,3 +264,31 @@ func SetRand(r io.Reader) {
|
||||
}
|
||||
rander = r
|
||||
}
|
||||
|
||||
// EnableRandPool enables internal randomness pool used for Random
|
||||
// (Version 4) UUID generation. The pool contains random bytes read from
|
||||
// the random number generator on demand in batches. Enabling the pool
|
||||
// may improve the UUID generation throughput significantly.
|
||||
//
|
||||
// Since the pool is stored on the Go heap, this feature may be a bad fit
|
||||
// for security sensitive applications.
|
||||
//
|
||||
// Both EnableRandPool and DisableRandPool are not thread-safe and should
|
||||
// only be called when there is no possibility that New or any other
|
||||
// UUID Version 4 generation function will be called concurrently.
|
||||
func EnableRandPool() {
|
||||
poolEnabled = true
|
||||
}
|
||||
|
||||
// DisableRandPool disables the randomness pool if it was previously
|
||||
// enabled with EnableRandPool.
|
||||
//
|
||||
// Both EnableRandPool and DisableRandPool are not thread-safe and should
|
||||
// only be called when there is no possibility that New or any other
|
||||
// UUID Version 4 generation function will be called concurrently.
|
||||
func DisableRandPool() {
|
||||
poolEnabled = false
|
||||
defer poolMu.Unlock()
|
||||
poolMu.Lock()
|
||||
poolPos = randPoolSize
|
||||
}
|
||||
|
35
vendor/github.com/google/uuid/version4.go
generated
vendored
35
vendor/github.com/google/uuid/version4.go
generated
vendored
@@ -14,11 +14,21 @@ func New() UUID {
|
||||
return Must(NewRandom())
|
||||
}
|
||||
|
||||
// NewString creates a new random UUID and returns it as a string or panics.
|
||||
// NewString is equivalent to the expression
|
||||
//
|
||||
// uuid.New().String()
|
||||
func NewString() string {
|
||||
return Must(NewRandom()).String()
|
||||
}
|
||||
|
||||
// NewRandom returns a Random (Version 4) UUID.
|
||||
//
|
||||
// The strength of the UUIDs is based on the strength of the crypto/rand
|
||||
// package.
|
||||
//
|
||||
// Uses the randomness pool if it was enabled with EnableRandPool.
|
||||
//
|
||||
// A note about uniqueness derived from the UUID Wikipedia entry:
|
||||
//
|
||||
// Randomly generated UUIDs have 122 random bits. One's annual risk of being
|
||||
@@ -27,7 +37,10 @@ func New() UUID {
|
||||
// equivalent to the odds of creating a few tens of trillions of UUIDs in a
|
||||
// year and having one duplicate.
|
||||
func NewRandom() (UUID, error) {
|
||||
return NewRandomFromReader(rander)
|
||||
if !poolEnabled {
|
||||
return NewRandomFromReader(rander)
|
||||
}
|
||||
return newRandomFromPool()
|
||||
}
|
||||
|
||||
// NewRandomFromReader returns a UUID based on bytes read from a given io.Reader.
|
||||
@@ -41,3 +54,23 @@ func NewRandomFromReader(r io.Reader) (UUID, error) {
|
||||
uuid[8] = (uuid[8] & 0x3f) | 0x80 // Variant is 10
|
||||
return uuid, nil
|
||||
}
|
||||
|
||||
func newRandomFromPool() (UUID, error) {
|
||||
var uuid UUID
|
||||
poolMu.Lock()
|
||||
if poolPos == randPoolSize {
|
||||
_, err := io.ReadFull(rander, pool[:])
|
||||
if err != nil {
|
||||
poolMu.Unlock()
|
||||
return Nil, err
|
||||
}
|
||||
poolPos = 0
|
||||
}
|
||||
copy(uuid[:], pool[poolPos:(poolPos+16)])
|
||||
poolPos += 16
|
||||
poolMu.Unlock()
|
||||
|
||||
uuid[6] = (uuid[6] & 0x0f) | 0x40 // Version 4
|
||||
uuid[8] = (uuid[8] & 0x3f) | 0x80 // Variant is 10
|
||||
return uuid, nil
|
||||
}
|
||||
|
25
vendor/github.com/gorilla/websocket/.gitignore
generated
vendored
Normal file
25
vendor/github.com/gorilla/websocket/.gitignore
generated
vendored
Normal file
@@ -0,0 +1,25 @@
|
||||
# Compiled Object files, Static and Dynamic libs (Shared Objects)
|
||||
*.o
|
||||
*.a
|
||||
*.so
|
||||
|
||||
# Folders
|
||||
_obj
|
||||
_test
|
||||
|
||||
# Architecture specific extensions/prefixes
|
||||
*.[568vq]
|
||||
[568vq].out
|
||||
|
||||
*.cgo1.go
|
||||
*.cgo2.c
|
||||
_cgo_defun.c
|
||||
_cgo_gotypes.go
|
||||
_cgo_export.*
|
||||
|
||||
_testmain.go
|
||||
|
||||
*.exe
|
||||
|
||||
.idea/
|
||||
*.iml
|
9
vendor/github.com/gorilla/websocket/AUTHORS
generated
vendored
Normal file
9
vendor/github.com/gorilla/websocket/AUTHORS
generated
vendored
Normal file
@@ -0,0 +1,9 @@
|
||||
# This is the official list of Gorilla WebSocket authors for copyright
|
||||
# purposes.
|
||||
#
|
||||
# Please keep the list sorted.
|
||||
|
||||
Gary Burd <gary@beagledreams.com>
|
||||
Google LLC (https://opensource.google.com/)
|
||||
Joachim Bauch <mail@joachim-bauch.de>
|
||||
|
22
vendor/github.com/gorilla/websocket/LICENSE
generated
vendored
Normal file
22
vendor/github.com/gorilla/websocket/LICENSE
generated
vendored
Normal file
@@ -0,0 +1,22 @@
|
||||
Copyright (c) 2013 The Gorilla WebSocket Authors. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are met:
|
||||
|
||||
Redistributions of source code must retain the above copyright notice, this
|
||||
list of conditions and the following disclaimer.
|
||||
|
||||
Redistributions in binary form must reproduce the above copyright notice,
|
||||
this list of conditions and the following disclaimer in the documentation
|
||||
and/or other materials provided with the distribution.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
||||
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
|
||||
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
64
vendor/github.com/gorilla/websocket/README.md
generated
vendored
Normal file
64
vendor/github.com/gorilla/websocket/README.md
generated
vendored
Normal file
@@ -0,0 +1,64 @@
|
||||
# Gorilla WebSocket
|
||||
|
||||
[](https://godoc.org/github.com/gorilla/websocket)
|
||||
[](https://circleci.com/gh/gorilla/websocket)
|
||||
|
||||
Gorilla WebSocket is a [Go](http://golang.org/) implementation of the
|
||||
[WebSocket](http://www.rfc-editor.org/rfc/rfc6455.txt) protocol.
|
||||
|
||||
### Documentation
|
||||
|
||||
* [API Reference](https://pkg.go.dev/github.com/gorilla/websocket?tab=doc)
|
||||
* [Chat example](https://github.com/gorilla/websocket/tree/master/examples/chat)
|
||||
* [Command example](https://github.com/gorilla/websocket/tree/master/examples/command)
|
||||
* [Client and server example](https://github.com/gorilla/websocket/tree/master/examples/echo)
|
||||
* [File watch example](https://github.com/gorilla/websocket/tree/master/examples/filewatch)
|
||||
|
||||
### Status
|
||||
|
||||
The Gorilla WebSocket package provides a complete and tested implementation of
|
||||
the [WebSocket](http://www.rfc-editor.org/rfc/rfc6455.txt) protocol. The
|
||||
package API is stable.
|
||||
|
||||
### Installation
|
||||
|
||||
go get github.com/gorilla/websocket
|
||||
|
||||
### Protocol Compliance
|
||||
|
||||
The Gorilla WebSocket package passes the server tests in the [Autobahn Test
|
||||
Suite](https://github.com/crossbario/autobahn-testsuite) using the application in the [examples/autobahn
|
||||
subdirectory](https://github.com/gorilla/websocket/tree/master/examples/autobahn).
|
||||
|
||||
### Gorilla WebSocket compared with other packages
|
||||
|
||||
<table>
|
||||
<tr>
|
||||
<th></th>
|
||||
<th><a href="http://godoc.org/github.com/gorilla/websocket">github.com/gorilla</a></th>
|
||||
<th><a href="http://godoc.org/golang.org/x/net/websocket">golang.org/x/net</a></th>
|
||||
</tr>
|
||||
<tr>
|
||||
<tr><td colspan="3"><a href="http://tools.ietf.org/html/rfc6455">RFC 6455</a> Features</td></tr>
|
||||
<tr><td>Passes <a href="https://github.com/crossbario/autobahn-testsuite">Autobahn Test Suite</a></td><td><a href="https://github.com/gorilla/websocket/tree/master/examples/autobahn">Yes</a></td><td>No</td></tr>
|
||||
<tr><td>Receive <a href="https://tools.ietf.org/html/rfc6455#section-5.4">fragmented</a> message<td>Yes</td><td><a href="https://code.google.com/p/go/issues/detail?id=7632">No</a>, see note 1</td></tr>
|
||||
<tr><td>Send <a href="https://tools.ietf.org/html/rfc6455#section-5.5.1">close</a> message</td><td><a href="http://godoc.org/github.com/gorilla/websocket#hdr-Control_Messages">Yes</a></td><td><a href="https://code.google.com/p/go/issues/detail?id=4588">No</a></td></tr>
|
||||
<tr><td>Send <a href="https://tools.ietf.org/html/rfc6455#section-5.5.2">pings</a> and receive <a href="https://tools.ietf.org/html/rfc6455#section-5.5.3">pongs</a></td><td><a href="http://godoc.org/github.com/gorilla/websocket#hdr-Control_Messages">Yes</a></td><td>No</td></tr>
|
||||
<tr><td>Get the <a href="https://tools.ietf.org/html/rfc6455#section-5.6">type</a> of a received data message</td><td>Yes</td><td>Yes, see note 2</td></tr>
|
||||
<tr><td colspan="3">Other Features</tr></td>
|
||||
<tr><td><a href="https://tools.ietf.org/html/rfc7692">Compression Extensions</a></td><td>Experimental</td><td>No</td></tr>
|
||||
<tr><td>Read message using io.Reader</td><td><a href="http://godoc.org/github.com/gorilla/websocket#Conn.NextReader">Yes</a></td><td>No, see note 3</td></tr>
|
||||
<tr><td>Write message using io.WriteCloser</td><td><a href="http://godoc.org/github.com/gorilla/websocket#Conn.NextWriter">Yes</a></td><td>No, see note 3</td></tr>
|
||||
</table>
|
||||
|
||||
Notes:
|
||||
|
||||
1. Large messages are fragmented in [Chrome's new WebSocket implementation](http://www.ietf.org/mail-archive/web/hybi/current/msg10503.html).
|
||||
2. The application can get the type of a received data message by implementing
|
||||
a [Codec marshal](http://godoc.org/golang.org/x/net/websocket#Codec.Marshal)
|
||||
function.
|
||||
3. The go.net io.Reader and io.Writer operate across WebSocket frame boundaries.
|
||||
Read returns when the input buffer is full or a frame boundary is
|
||||
encountered. Each call to Write sends a single frame message. The Gorilla
|
||||
io.Reader and io.WriteCloser operate on a single WebSocket message.
|
||||
|
395
vendor/github.com/gorilla/websocket/client.go
generated
vendored
Normal file
395
vendor/github.com/gorilla/websocket/client.go
generated
vendored
Normal file
@@ -0,0 +1,395 @@
|
||||
// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package websocket
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"errors"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/http/httptrace"
|
||||
"net/url"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
// ErrBadHandshake is returned when the server response to opening handshake is
|
||||
// invalid.
|
||||
var ErrBadHandshake = errors.New("websocket: bad handshake")
|
||||
|
||||
var errInvalidCompression = errors.New("websocket: invalid compression negotiation")
|
||||
|
||||
// NewClient creates a new client connection using the given net connection.
|
||||
// The URL u specifies the host and request URI. Use requestHeader to specify
|
||||
// the origin (Origin), subprotocols (Sec-WebSocket-Protocol) and cookies
|
||||
// (Cookie). Use the response.Header to get the selected subprotocol
|
||||
// (Sec-WebSocket-Protocol) and cookies (Set-Cookie).
|
||||
//
|
||||
// If the WebSocket handshake fails, ErrBadHandshake is returned along with a
|
||||
// non-nil *http.Response so that callers can handle redirects, authentication,
|
||||
// etc.
|
||||
//
|
||||
// Deprecated: Use Dialer instead.
|
||||
func NewClient(netConn net.Conn, u *url.URL, requestHeader http.Header, readBufSize, writeBufSize int) (c *Conn, response *http.Response, err error) {
|
||||
d := Dialer{
|
||||
ReadBufferSize: readBufSize,
|
||||
WriteBufferSize: writeBufSize,
|
||||
NetDial: func(net, addr string) (net.Conn, error) {
|
||||
return netConn, nil
|
||||
},
|
||||
}
|
||||
return d.Dial(u.String(), requestHeader)
|
||||
}
|
||||
|
||||
// A Dialer contains options for connecting to WebSocket server.
|
||||
type Dialer struct {
|
||||
// NetDial specifies the dial function for creating TCP connections. If
|
||||
// NetDial is nil, net.Dial is used.
|
||||
NetDial func(network, addr string) (net.Conn, error)
|
||||
|
||||
// NetDialContext specifies the dial function for creating TCP connections. If
|
||||
// NetDialContext is nil, net.DialContext is used.
|
||||
NetDialContext func(ctx context.Context, network, addr string) (net.Conn, error)
|
||||
|
||||
// Proxy specifies a function to return a proxy for a given
|
||||
// Request. If the function returns a non-nil error, the
|
||||
// request is aborted with the provided error.
|
||||
// If Proxy is nil or returns a nil *URL, no proxy is used.
|
||||
Proxy func(*http.Request) (*url.URL, error)
|
||||
|
||||
// TLSClientConfig specifies the TLS configuration to use with tls.Client.
|
||||
// If nil, the default configuration is used.
|
||||
TLSClientConfig *tls.Config
|
||||
|
||||
// HandshakeTimeout specifies the duration for the handshake to complete.
|
||||
HandshakeTimeout time.Duration
|
||||
|
||||
// ReadBufferSize and WriteBufferSize specify I/O buffer sizes in bytes. If a buffer
|
||||
// size is zero, then a useful default size is used. The I/O buffer sizes
|
||||
// do not limit the size of the messages that can be sent or received.
|
||||
ReadBufferSize, WriteBufferSize int
|
||||
|
||||
// WriteBufferPool is a pool of buffers for write operations. If the value
|
||||
// is not set, then write buffers are allocated to the connection for the
|
||||
// lifetime of the connection.
|
||||
//
|
||||
// A pool is most useful when the application has a modest volume of writes
|
||||
// across a large number of connections.
|
||||
//
|
||||
// Applications should use a single pool for each unique value of
|
||||
// WriteBufferSize.
|
||||
WriteBufferPool BufferPool
|
||||
|
||||
// Subprotocols specifies the client's requested subprotocols.
|
||||
Subprotocols []string
|
||||
|
||||
// EnableCompression specifies if the client should attempt to negotiate
|
||||
// per message compression (RFC 7692). Setting this value to true does not
|
||||
// guarantee that compression will be supported. Currently only "no context
|
||||
// takeover" modes are supported.
|
||||
EnableCompression bool
|
||||
|
||||
// Jar specifies the cookie jar.
|
||||
// If Jar is nil, cookies are not sent in requests and ignored
|
||||
// in responses.
|
||||
Jar http.CookieJar
|
||||
}
|
||||
|
||||
// Dial creates a new client connection by calling DialContext with a background context.
|
||||
func (d *Dialer) Dial(urlStr string, requestHeader http.Header) (*Conn, *http.Response, error) {
|
||||
return d.DialContext(context.Background(), urlStr, requestHeader)
|
||||
}
|
||||
|
||||
var errMalformedURL = errors.New("malformed ws or wss URL")
|
||||
|
||||
func hostPortNoPort(u *url.URL) (hostPort, hostNoPort string) {
|
||||
hostPort = u.Host
|
||||
hostNoPort = u.Host
|
||||
if i := strings.LastIndex(u.Host, ":"); i > strings.LastIndex(u.Host, "]") {
|
||||
hostNoPort = hostNoPort[:i]
|
||||
} else {
|
||||
switch u.Scheme {
|
||||
case "wss":
|
||||
hostPort += ":443"
|
||||
case "https":
|
||||
hostPort += ":443"
|
||||
default:
|
||||
hostPort += ":80"
|
||||
}
|
||||
}
|
||||
return hostPort, hostNoPort
|
||||
}
|
||||
|
||||
// DefaultDialer is a dialer with all fields set to the default values.
|
||||
var DefaultDialer = &Dialer{
|
||||
Proxy: http.ProxyFromEnvironment,
|
||||
HandshakeTimeout: 45 * time.Second,
|
||||
}
|
||||
|
||||
// nilDialer is dialer to use when receiver is nil.
|
||||
var nilDialer = *DefaultDialer
|
||||
|
||||
// DialContext creates a new client connection. Use requestHeader to specify the
|
||||
// origin (Origin), subprotocols (Sec-WebSocket-Protocol) and cookies (Cookie).
|
||||
// Use the response.Header to get the selected subprotocol
|
||||
// (Sec-WebSocket-Protocol) and cookies (Set-Cookie).
|
||||
//
|
||||
// The context will be used in the request and in the Dialer.
|
||||
//
|
||||
// If the WebSocket handshake fails, ErrBadHandshake is returned along with a
|
||||
// non-nil *http.Response so that callers can handle redirects, authentication,
|
||||
// etcetera. The response body may not contain the entire response and does not
|
||||
// need to be closed by the application.
|
||||
func (d *Dialer) DialContext(ctx context.Context, urlStr string, requestHeader http.Header) (*Conn, *http.Response, error) {
|
||||
if d == nil {
|
||||
d = &nilDialer
|
||||
}
|
||||
|
||||
challengeKey, err := generateChallengeKey()
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
u, err := url.Parse(urlStr)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
switch u.Scheme {
|
||||
case "ws":
|
||||
u.Scheme = "http"
|
||||
case "wss":
|
||||
u.Scheme = "https"
|
||||
default:
|
||||
return nil, nil, errMalformedURL
|
||||
}
|
||||
|
||||
if u.User != nil {
|
||||
// User name and password are not allowed in websocket URIs.
|
||||
return nil, nil, errMalformedURL
|
||||
}
|
||||
|
||||
req := &http.Request{
|
||||
Method: "GET",
|
||||
URL: u,
|
||||
Proto: "HTTP/1.1",
|
||||
ProtoMajor: 1,
|
||||
ProtoMinor: 1,
|
||||
Header: make(http.Header),
|
||||
Host: u.Host,
|
||||
}
|
||||
req = req.WithContext(ctx)
|
||||
|
||||
// Set the cookies present in the cookie jar of the dialer
|
||||
if d.Jar != nil {
|
||||
for _, cookie := range d.Jar.Cookies(u) {
|
||||
req.AddCookie(cookie)
|
||||
}
|
||||
}
|
||||
|
||||
// Set the request headers using the capitalization for names and values in
|
||||
// RFC examples. Although the capitalization shouldn't matter, there are
|
||||
// servers that depend on it. The Header.Set method is not used because the
|
||||
// method canonicalizes the header names.
|
||||
req.Header["Upgrade"] = []string{"websocket"}
|
||||
req.Header["Connection"] = []string{"Upgrade"}
|
||||
req.Header["Sec-WebSocket-Key"] = []string{challengeKey}
|
||||
req.Header["Sec-WebSocket-Version"] = []string{"13"}
|
||||
if len(d.Subprotocols) > 0 {
|
||||
req.Header["Sec-WebSocket-Protocol"] = []string{strings.Join(d.Subprotocols, ", ")}
|
||||
}
|
||||
for k, vs := range requestHeader {
|
||||
switch {
|
||||
case k == "Host":
|
||||
if len(vs) > 0 {
|
||||
req.Host = vs[0]
|
||||
}
|
||||
case k == "Upgrade" ||
|
||||
k == "Connection" ||
|
||||
k == "Sec-Websocket-Key" ||
|
||||
k == "Sec-Websocket-Version" ||
|
||||
k == "Sec-Websocket-Extensions" ||
|
||||
(k == "Sec-Websocket-Protocol" && len(d.Subprotocols) > 0):
|
||||
return nil, nil, errors.New("websocket: duplicate header not allowed: " + k)
|
||||
case k == "Sec-Websocket-Protocol":
|
||||
req.Header["Sec-WebSocket-Protocol"] = vs
|
||||
default:
|
||||
req.Header[k] = vs
|
||||
}
|
||||
}
|
||||
|
||||
if d.EnableCompression {
|
||||
req.Header["Sec-WebSocket-Extensions"] = []string{"permessage-deflate; server_no_context_takeover; client_no_context_takeover"}
|
||||
}
|
||||
|
||||
if d.HandshakeTimeout != 0 {
|
||||
var cancel func()
|
||||
ctx, cancel = context.WithTimeout(ctx, d.HandshakeTimeout)
|
||||
defer cancel()
|
||||
}
|
||||
|
||||
// Get network dial function.
|
||||
var netDial func(network, add string) (net.Conn, error)
|
||||
|
||||
if d.NetDialContext != nil {
|
||||
netDial = func(network, addr string) (net.Conn, error) {
|
||||
return d.NetDialContext(ctx, network, addr)
|
||||
}
|
||||
} else if d.NetDial != nil {
|
||||
netDial = d.NetDial
|
||||
} else {
|
||||
netDialer := &net.Dialer{}
|
||||
netDial = func(network, addr string) (net.Conn, error) {
|
||||
return netDialer.DialContext(ctx, network, addr)
|
||||
}
|
||||
}
|
||||
|
||||
// If needed, wrap the dial function to set the connection deadline.
|
||||
if deadline, ok := ctx.Deadline(); ok {
|
||||
forwardDial := netDial
|
||||
netDial = func(network, addr string) (net.Conn, error) {
|
||||
c, err := forwardDial(network, addr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err = c.SetDeadline(deadline)
|
||||
if err != nil {
|
||||
c.Close()
|
||||
return nil, err
|
||||
}
|
||||
return c, nil
|
||||
}
|
||||
}
|
||||
|
||||
// If needed, wrap the dial function to connect through a proxy.
|
||||
if d.Proxy != nil {
|
||||
proxyURL, err := d.Proxy(req)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
if proxyURL != nil {
|
||||
dialer, err := proxy_FromURL(proxyURL, netDialerFunc(netDial))
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
netDial = dialer.Dial
|
||||
}
|
||||
}
|
||||
|
||||
hostPort, hostNoPort := hostPortNoPort(u)
|
||||
trace := httptrace.ContextClientTrace(ctx)
|
||||
if trace != nil && trace.GetConn != nil {
|
||||
trace.GetConn(hostPort)
|
||||
}
|
||||
|
||||
netConn, err := netDial("tcp", hostPort)
|
||||
if trace != nil && trace.GotConn != nil {
|
||||
trace.GotConn(httptrace.GotConnInfo{
|
||||
Conn: netConn,
|
||||
})
|
||||
}
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if netConn != nil {
|
||||
netConn.Close()
|
||||
}
|
||||
}()
|
||||
|
||||
if u.Scheme == "https" {
|
||||
cfg := cloneTLSConfig(d.TLSClientConfig)
|
||||
if cfg.ServerName == "" {
|
||||
cfg.ServerName = hostNoPort
|
||||
}
|
||||
tlsConn := tls.Client(netConn, cfg)
|
||||
netConn = tlsConn
|
||||
|
||||
var err error
|
||||
if trace != nil {
|
||||
err = doHandshakeWithTrace(trace, tlsConn, cfg)
|
||||
} else {
|
||||
err = doHandshake(tlsConn, cfg)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
}
|
||||
|
||||
conn := newConn(netConn, false, d.ReadBufferSize, d.WriteBufferSize, d.WriteBufferPool, nil, nil)
|
||||
|
||||
if err := req.Write(netConn); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
if trace != nil && trace.GotFirstResponseByte != nil {
|
||||
if peek, err := conn.br.Peek(1); err == nil && len(peek) == 1 {
|
||||
trace.GotFirstResponseByte()
|
||||
}
|
||||
}
|
||||
|
||||
resp, err := http.ReadResponse(conn.br, req)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
if d.Jar != nil {
|
||||
if rc := resp.Cookies(); len(rc) > 0 {
|
||||
d.Jar.SetCookies(u, rc)
|
||||
}
|
||||
}
|
||||
|
||||
if resp.StatusCode != 101 ||
|
||||
!strings.EqualFold(resp.Header.Get("Upgrade"), "websocket") ||
|
||||
!strings.EqualFold(resp.Header.Get("Connection"), "upgrade") ||
|
||||
resp.Header.Get("Sec-Websocket-Accept") != computeAcceptKey(challengeKey) {
|
||||
// Before closing the network connection on return from this
|
||||
// function, slurp up some of the response to aid application
|
||||
// debugging.
|
||||
buf := make([]byte, 1024)
|
||||
n, _ := io.ReadFull(resp.Body, buf)
|
||||
resp.Body = ioutil.NopCloser(bytes.NewReader(buf[:n]))
|
||||
return nil, resp, ErrBadHandshake
|
||||
}
|
||||
|
||||
for _, ext := range parseExtensions(resp.Header) {
|
||||
if ext[""] != "permessage-deflate" {
|
||||
continue
|
||||
}
|
||||
_, snct := ext["server_no_context_takeover"]
|
||||
_, cnct := ext["client_no_context_takeover"]
|
||||
if !snct || !cnct {
|
||||
return nil, resp, errInvalidCompression
|
||||
}
|
||||
conn.newCompressionWriter = compressNoContextTakeover
|
||||
conn.newDecompressionReader = decompressNoContextTakeover
|
||||
break
|
||||
}
|
||||
|
||||
resp.Body = ioutil.NopCloser(bytes.NewReader([]byte{}))
|
||||
conn.subprotocol = resp.Header.Get("Sec-Websocket-Protocol")
|
||||
|
||||
netConn.SetDeadline(time.Time{})
|
||||
netConn = nil // to avoid close in defer.
|
||||
return conn, resp, nil
|
||||
}
|
||||
|
||||
func doHandshake(tlsConn *tls.Conn, cfg *tls.Config) error {
|
||||
if err := tlsConn.Handshake(); err != nil {
|
||||
return err
|
||||
}
|
||||
if !cfg.InsecureSkipVerify {
|
||||
if err := tlsConn.VerifyHostname(cfg.ServerName); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
16
vendor/github.com/gorilla/websocket/client_clone.go
generated
vendored
Normal file
16
vendor/github.com/gorilla/websocket/client_clone.go
generated
vendored
Normal file
@@ -0,0 +1,16 @@
|
||||
// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build go1.8
|
||||
|
||||
package websocket
|
||||
|
||||
import "crypto/tls"
|
||||
|
||||
func cloneTLSConfig(cfg *tls.Config) *tls.Config {
|
||||
if cfg == nil {
|
||||
return &tls.Config{}
|
||||
}
|
||||
return cfg.Clone()
|
||||
}
|
38
vendor/github.com/gorilla/websocket/client_clone_legacy.go
generated
vendored
Normal file
38
vendor/github.com/gorilla/websocket/client_clone_legacy.go
generated
vendored
Normal file
@@ -0,0 +1,38 @@
|
||||
// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build !go1.8
|
||||
|
||||
package websocket
|
||||
|
||||
import "crypto/tls"
|
||||
|
||||
// cloneTLSConfig clones all public fields except the fields
|
||||
// SessionTicketsDisabled and SessionTicketKey. This avoids copying the
|
||||
// sync.Mutex in the sync.Once and makes it safe to call cloneTLSConfig on a
|
||||
// config in active use.
|
||||
func cloneTLSConfig(cfg *tls.Config) *tls.Config {
|
||||
if cfg == nil {
|
||||
return &tls.Config{}
|
||||
}
|
||||
return &tls.Config{
|
||||
Rand: cfg.Rand,
|
||||
Time: cfg.Time,
|
||||
Certificates: cfg.Certificates,
|
||||
NameToCertificate: cfg.NameToCertificate,
|
||||
GetCertificate: cfg.GetCertificate,
|
||||
RootCAs: cfg.RootCAs,
|
||||
NextProtos: cfg.NextProtos,
|
||||
ServerName: cfg.ServerName,
|
||||
ClientAuth: cfg.ClientAuth,
|
||||
ClientCAs: cfg.ClientCAs,
|
||||
InsecureSkipVerify: cfg.InsecureSkipVerify,
|
||||
CipherSuites: cfg.CipherSuites,
|
||||
PreferServerCipherSuites: cfg.PreferServerCipherSuites,
|
||||
ClientSessionCache: cfg.ClientSessionCache,
|
||||
MinVersion: cfg.MinVersion,
|
||||
MaxVersion: cfg.MaxVersion,
|
||||
CurvePreferences: cfg.CurvePreferences,
|
||||
}
|
||||
}
|
148
vendor/github.com/gorilla/websocket/compression.go
generated
vendored
Normal file
148
vendor/github.com/gorilla/websocket/compression.go
generated
vendored
Normal file
@@ -0,0 +1,148 @@
|
||||
// Copyright 2017 The Gorilla WebSocket Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package websocket
|
||||
|
||||
import (
|
||||
"compress/flate"
|
||||
"errors"
|
||||
"io"
|
||||
"strings"
|
||||
"sync"
|
||||
)
|
||||
|
||||
const (
|
||||
minCompressionLevel = -2 // flate.HuffmanOnly not defined in Go < 1.6
|
||||
maxCompressionLevel = flate.BestCompression
|
||||
defaultCompressionLevel = 1
|
||||
)
|
||||
|
||||
var (
|
||||
flateWriterPools [maxCompressionLevel - minCompressionLevel + 1]sync.Pool
|
||||
flateReaderPool = sync.Pool{New: func() interface{} {
|
||||
return flate.NewReader(nil)
|
||||
}}
|
||||
)
|
||||
|
||||
func decompressNoContextTakeover(r io.Reader) io.ReadCloser {
|
||||
const tail =
|
||||
// Add four bytes as specified in RFC
|
||||
"\x00\x00\xff\xff" +
|
||||
// Add final block to squelch unexpected EOF error from flate reader.
|
||||
"\x01\x00\x00\xff\xff"
|
||||
|
||||
fr, _ := flateReaderPool.Get().(io.ReadCloser)
|
||||
fr.(flate.Resetter).Reset(io.MultiReader(r, strings.NewReader(tail)), nil)
|
||||
return &flateReadWrapper{fr}
|
||||
}
|
||||
|
||||
func isValidCompressionLevel(level int) bool {
|
||||
return minCompressionLevel <= level && level <= maxCompressionLevel
|
||||
}
|
||||
|
||||
func compressNoContextTakeover(w io.WriteCloser, level int) io.WriteCloser {
|
||||
p := &flateWriterPools[level-minCompressionLevel]
|
||||
tw := &truncWriter{w: w}
|
||||
fw, _ := p.Get().(*flate.Writer)
|
||||
if fw == nil {
|
||||
fw, _ = flate.NewWriter(tw, level)
|
||||
} else {
|
||||
fw.Reset(tw)
|
||||
}
|
||||
return &flateWriteWrapper{fw: fw, tw: tw, p: p}
|
||||
}
|
||||
|
||||
// truncWriter is an io.Writer that writes all but the last four bytes of the
|
||||
// stream to another io.Writer.
|
||||
type truncWriter struct {
|
||||
w io.WriteCloser
|
||||
n int
|
||||
p [4]byte
|
||||
}
|
||||
|
||||
func (w *truncWriter) Write(p []byte) (int, error) {
|
||||
n := 0
|
||||
|
||||
// fill buffer first for simplicity.
|
||||
if w.n < len(w.p) {
|
||||
n = copy(w.p[w.n:], p)
|
||||
p = p[n:]
|
||||
w.n += n
|
||||
if len(p) == 0 {
|
||||
return n, nil
|
||||
}
|
||||
}
|
||||
|
||||
m := len(p)
|
||||
if m > len(w.p) {
|
||||
m = len(w.p)
|
||||
}
|
||||
|
||||
if nn, err := w.w.Write(w.p[:m]); err != nil {
|
||||
return n + nn, err
|
||||
}
|
||||
|
||||
copy(w.p[:], w.p[m:])
|
||||
copy(w.p[len(w.p)-m:], p[len(p)-m:])
|
||||
nn, err := w.w.Write(p[:len(p)-m])
|
||||
return n + nn, err
|
||||
}
|
||||
|
||||
type flateWriteWrapper struct {
|
||||
fw *flate.Writer
|
||||
tw *truncWriter
|
||||
p *sync.Pool
|
||||
}
|
||||
|
||||
func (w *flateWriteWrapper) Write(p []byte) (int, error) {
|
||||
if w.fw == nil {
|
||||
return 0, errWriteClosed
|
||||
}
|
||||
return w.fw.Write(p)
|
||||
}
|
||||
|
||||
func (w *flateWriteWrapper) Close() error {
|
||||
if w.fw == nil {
|
||||
return errWriteClosed
|
||||
}
|
||||
err1 := w.fw.Flush()
|
||||
w.p.Put(w.fw)
|
||||
w.fw = nil
|
||||
if w.tw.p != [4]byte{0, 0, 0xff, 0xff} {
|
||||
return errors.New("websocket: internal error, unexpected bytes at end of flate stream")
|
||||
}
|
||||
err2 := w.tw.w.Close()
|
||||
if err1 != nil {
|
||||
return err1
|
||||
}
|
||||
return err2
|
||||
}
|
||||
|
||||
type flateReadWrapper struct {
|
||||
fr io.ReadCloser
|
||||
}
|
||||
|
||||
func (r *flateReadWrapper) Read(p []byte) (int, error) {
|
||||
if r.fr == nil {
|
||||
return 0, io.ErrClosedPipe
|
||||
}
|
||||
n, err := r.fr.Read(p)
|
||||
if err == io.EOF {
|
||||
// Preemptively place the reader back in the pool. This helps with
|
||||
// scenarios where the application does not call NextReader() soon after
|
||||
// this final read.
|
||||
r.Close()
|
||||
}
|
||||
return n, err
|
||||
}
|
||||
|
||||
func (r *flateReadWrapper) Close() error {
|
||||
if r.fr == nil {
|
||||
return io.ErrClosedPipe
|
||||
}
|
||||
err := r.fr.Close()
|
||||
flateReaderPool.Put(r.fr)
|
||||
r.fr = nil
|
||||
return err
|
||||
}
|
1201
vendor/github.com/gorilla/websocket/conn.go
generated
vendored
Normal file
1201
vendor/github.com/gorilla/websocket/conn.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user