Compare commits

...

120 Commits

Author SHA1 Message Date
Marton Soos
209bc7cd17 Add crds for connect, 1password items and permissions 2022-04-15 12:28:59 +02:00
Marton Soos
493b311564 Merge branch 'main' into feature/migrate-operator-sdk-dependency 2022-04-15 12:00:50 +02:00
Marton Soos
e39cff881d Migrate main.go 2022-04-15 11:44:32 +02:00
Jillian W
0796b9c5e2 Merge pull request #105 from 1Password/release/v1.4.1
Prepare Release - v1.4.1
2022-04-12 13:49:26 -03:00
Joris Coenen
37a0f4b51e Prepare release v1.4.1 2022-04-12 18:46:06 +02:00
Joris Coenen
004e0101ff Merge pull request #104 from 1Password/secret-annotations
Stop copying annotations from OnePasswordItem and Deployment to Secret
2022-04-12 18:22:24 +02:00
Joris Coenen
6326a856ae Fix test
Annotations are no longer copied from the deployment to the secret,
so the test should not assert that the secret has a name annotation.
2022-04-12 10:41:11 +02:00
Joris Coenen
1ddf92c5a0 Merge branch 'main' into secret-annotations 2022-04-12 10:15:32 +02:00
Joris Coenen
f5c6fa5860 Merge pull request #103 from 1Password/owner-reference-item-update
Persist OwnerReferences when item is updated
2022-04-12 10:14:21 +02:00
Joris Coenen
afa076d321 Stop copying annotations from OnePasswordItem and Deployment to Secret
There is no reason for random annotations to be carried over. This
can lead to weird problems like the `kubectl.kubernetes.io/last-applied-configuration`
annotation ending up on a Secret.
2022-04-11 15:55:28 +02:00
Joris Coenen
d4b04c233c Add missing error checks 2022-04-11 12:12:58 +02:00
Joris Coenen
ea68cfc2b4 Persist OwnerReferences when 1Password item is updated 2022-04-11 12:12:58 +02:00
Marton Soos
1a085562e4 Migrate controllers and helper code from pkg 2022-04-11 10:06:24 +02:00
Marton Soos
21111fec90 Migrate OnePasswordItem resource 2022-04-11 09:12:37 +02:00
Marton Soos
69cc7cedb0 Add OnePasswordItem resource
operator-sdk create api --group onepassword --version v1 --kind OnePasswordItem --resource --controller
2022-04-11 09:06:29 +02:00
Marton Soos
b30c6130f7 Initial commit 2022-04-11 09:01:03 +02:00
Jillian W
58b4ff8ecf Merge pull request #99 from 1Password/release/v1.4.0
preparing release 1.4.0
2022-04-07 11:59:20 -03:00
jillianwilson
d93fecdc76 preparing release 1.4.0 2022-04-07 10:23:12 -03:00
Jillian W
486465247d Merge pull request #97 from slok/slok/owner-references
Add owner reference to the created secrets by the operator
2022-04-07 09:19:54 -03:00
Xabier Larrakoetxea
79868ae374 Add owner reference to the created secrets
Signed-off-by: Xabier Larrakoetxea <me@slok.dev>
2022-04-05 20:31:42 +02:00
Marton Soos
6286f7e306 Merge pull request #54 from mcmarkj/secret-path-updates
Deal with itemPath's changing
2022-03-28 15:33:34 +02:00
Marton Soos
0b5efc8690 Merge branch 'main' into secret-path-updates 2022-03-28 15:30:46 +02:00
Marton Soos
c00baeedcb Merge pull request #94 from 1Password/release/v1.3.0
Prepare Release - v1.3.0
2022-03-25 15:51:18 +01:00
Marton Soos
a37bddbfd9 Update release notes 2022-03-25 14:58:07 +01:00
Marton Soos
bd9922f635 Merge pull request #93 from 1Password/feature/file-support
Support loading file fields into Kubernetes secrets
2022-03-24 18:59:28 +01:00
Marton Soos
8fa4413880 Update readme to mention that non-file fields take precedence over file-fields 2022-03-24 18:00:24 +01:00
Marton Soos
62e55a3f19 Update tests and mock client 2022-03-24 12:13:34 +01:00
Marton Soos
d6f7b80c40 Log a message if a file on an item is ignored due to a field with the same name 2022-03-24 11:56:33 +01:00
Marton Soos
a903f9b1af Also add file data to kubernetes secrets 2022-03-24 11:37:24 +01:00
Jillian W
5cddc9d8a9 Merge pull request #92 from 1Password/feature/fix-readme-command
Fix wrong command in readme
2022-03-23 15:54:19 -03:00
Marton Soos
7e1b94fae7 Fix wrong command in readme 2022-03-23 19:36:57 +01:00
Marton Soos
6953a89c89 Merge pull request #91 from AlexVanderbist/patch-1
Fix typos in readme (auto reset -> auto restart)
2022-03-21 12:29:24 +01:00
Marton Soos
0d9e07f543 Update go sdk version to v1.2.0 2022-03-21 11:19:49 +01:00
Alex Vanderbist
098d504d2a Fix typos in readme (auto reset -> auto restart) 2022-03-16 18:21:00 +01:00
Marton Soos
b68d9a5d79 Merge pull request #89 from 1Password/release/v1.2.0
Prepare Release - v1.2.0
2022-02-21 12:25:44 +01:00
Marton Soos
befcaae457 Fix typo in changelog 2022-02-21 11:49:14 +01:00
Marton Soos
b24aa48bd6 Add release notes for v1.2.0 2022-02-21 11:03:14 +01:00
Marton Soos
b1e251dee6 Merge pull request #74 from Nuglif/main
Verify secrets and FromEnv in addition to Env
2022-02-18 20:13:08 +01:00
Marton Soos
a34c6e8b38 Merge pull request #87 from 1Password/feature/kubernetes-secret-types
Feature: Support configuring Kubernetes secret type
2022-02-18 14:16:22 +01:00
Marton Soos
b16960057a Update tests and add new test 2022-02-18 10:47:14 +01:00
Marton Soos
285496dc7e Error when secret type is changed 2022-02-18 10:27:48 +01:00
Marton Soos
f38cf7e1c2 Fix tests and add new test 2022-02-17 21:23:22 +01:00
Marton Soos
bb7a0c8ca9 Simplify secret type cast and default to Opaque 2022-02-17 19:36:49 +01:00
Marton Soos
302653832e Account for the fact that the '' type and Opaque are equivalent on secret comparison 2022-02-17 19:18:33 +01:00
Marton Soos
a1bcfdfdcb Merge branch 'main' into feature/kubernetes-secret-types 2022-02-17 17:54:17 +01:00
Floris van der Grinten
c0f1632638 Merge pull request #72 from samifruit514/main
More logging if 1password item cant be read and continue processing other items
2021-11-18 13:39:34 +01:00
Floris van der Grinten
c46065fa7a Merge branch 'main' into samifruit514/main 2021-11-18 13:29:55 +01:00
Andres Montalban
5d229c42d5 feat: Allow configuration of the Kubernetes Secret type to be created 2021-11-18 08:32:55 -03:00
Joris Coenen
c7235b4f09 Merge pull request #49 from FabioAntunes/patch-1
Update README.md
2021-10-04 12:33:02 +02:00
Joris Coenen
5183fc129a Merge branch 'main' into patch-1 2021-10-04 12:29:48 +02:00
David Gunter
7d619165b2 Merge pull request #76 from Klaudioz/patch-1
Removing $ from bash commands
2021-09-30 09:26:03 -07:00
Claudio Canales
0363ae1e4e Removing $ from bash commands
Using the copy button is bringing the commands with a $, which is giving the error `-bash: $: command not found` after pasting them to the console.
2021-09-29 16:16:45 -03:00
Samuel Archambault
d9e003bdb7 cleanup comments 2021-09-24 14:02:46 -04:00
Samuel Archambault
b25f943b3a Verify secrets and FromEnv in addition to Env 2021-09-24 13:51:05 -04:00
Samuel Archambault
5fab662424 More logging if 1password item cant be read and continue processing others 2021-09-24 11:03:47 -04:00
David Gunter
d807e92c36 Merge pull request #71 from 1Password/release/v1.1.0
Prepare Release - v1.1.0
2021-09-23 11:21:16 -07:00
david.gunter
244771717c Prepare release/1.1.0 2021-09-23 11:18:53 -07:00
mcmarkj
a760e524ea Merge branch 'main' of github.com:1Password/onepassword-operator into secret-path-updates 2021-09-13 13:28:25 +01:00
Floris van der Grinten
7aeb36e383 Merge pull request #66 from 1Password/fix/handling-key-names
Handling key names
2021-09-13 13:34:44 +02:00
Floris van der Grinten
5c2f840623 Merge pull request #43 from mcmarkj/pass-labels-and-annotations
Add Labels & Annotations from OPObject to Secret
2021-09-13 13:33:38 +02:00
Eddy Filip
670040477e Add max length for secret key names
Max length for secret key names must be DNS1123 compliant (253)
2021-09-08 16:02:08 +03:00
Eddy Filip
a45a310611 Make secret names DNS1123 Subdomain compiant
This is done while ensuring that secret keys are compliant (contain alphanumeric characters, `-`, `_` and `.`)
2021-09-08 15:36:40 +03:00
Eddy Filip
d80e8dd799 Add tests with names that contain . and _ 2021-09-08 13:58:48 +03:00
Eddy Filip
88728909ff Adjust regex to support _ and . and trim them
Now secret names can also contain `_` and `.` and they will be trimmed from start and end of string to be DNS1123 compliant
2021-09-08 13:49:32 +03:00
Marton Soos
e365ebfdfa Fix tests 2021-09-03 15:42:02 +03:00
Marton Soos
2c4b4df01a Do not make secret names lowercase on normalization 2021-09-03 15:41:46 +03:00
Jillian W
49d984c6f2 Merge pull request #64 from 1Password/release/v1.0.2
Preparing release
2021-08-27 15:32:40 -03:00
jillianwilson
72cad7284c Preparing release 2021-08-27 15:21:07 -03:00
mcmarkj
19f774bb2d Merge branch 'main' of github.com:1Password/onepassword-operator into secret-path-updates 2021-08-19 16:17:57 +01:00
mcmarkj
0193a98681 Merge branch 'main' of github.com:1Password/onepassword-operator into pass-labels-and-annotations 2021-08-19 16:15:02 +01:00
mcmarkj
f241d7423d Use deepequal 2021-08-19 16:11:29 +01:00
Eduard Filip
6043e0da0b Merge pull request #58 from 1Password/dg/normalize-secret-name
Add secret name normalizer to the operator.
2021-08-17 20:28:07 +02:00
Eddy Filip
753cc5e9a3 Update note in README
The note now explains how the item title and fields are made into DNS subdomain-compliant names for creating Kubernetes secrets.
2021-08-16 15:24:04 +02:00
Eddy Filip
8cfe98073e Improve testing
Fix previous tests and add test for items with field names that are not valid DNS subdomain names.
2021-08-16 14:51:44 +02:00
mcmarkj
c0037526b0 remove commit file 2021-08-15 15:32:18 +01:00
david.gunter
96b42e7c52 Label normalizer now fixes both Secret names and data keys.
Each key in the `data` section of a secret must also be a valid DNS subdomain. The operator needs to "fix" the 1Password item fields before trying to create the secret.
2021-08-06 13:18:21 -07:00
david.gunter
579b5848da Add secret name normalizer to the operator.
The operator will now reformat 1Password item names to become valid names K8s Secret objects. Secret names must be a valid DNS subdomain name. See more: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#dns-subdomain-names
2021-08-05 16:39:55 -07:00
mcmarkj
dff934cbc3 Fix tests 2021-08-04 06:33:56 +01:00
mcmarkj
2096f4440f add logic for checking for label or annotation updates 2021-08-03 21:32:04 +01:00
mcmarkj
b3fc707337 Merge branch 'main' of github.com:1Password/onepassword-operator into pass-labels-and-annotations 2021-07-23 15:29:24 +01:00
mcmarkj
32643651d9 Fix tests 2021-07-23 15:08:44 +01:00
mcmarkj
ba8d3fa698 Lookup the vaultPath for secrets to check for updates 2021-07-23 13:32:15 +01:00
mcmarkj
c57aa22a9c Update if in the poller 2021-07-22 08:18:52 +01:00
mcmarkj
48944b0d56 Deal with item paths changing 2021-07-22 07:11:50 +01:00
Fábio Antunes
313cd1169b Update README.md
Minor update to the README. Got me debugging for a few hours
2021-07-02 10:23:28 +01:00
Eduard Filip
b50d864b50 Merge pull request #46 from 1Password/connect-deploy-custom-namespace
Add support custom namespace for connect deployment
2021-06-17 14:13:43 +03:00
Eduard Filip
1643385d9b Merge pull request #45 from 1Password/fix/makefile
Add missing argument for docker build
2021-06-17 14:13:11 +03:00
Eddy Filip
9441214733 Add support custom namespace for connect deployment
Now when the operator is deployed with the `MANAGE_CONNECT` env var set to true, the connect instance is deployed in the same namespace as the operator.
2021-06-09 20:45:33 +03:00
Eddy Filip
7e4e988813 Add missing argument for docker build
`make build` and `make build/local` would fail because the docker build commands were incomplete.
2021-06-09 16:02:05 +03:00
mcmarkj
fb1262f1bd PR Feedback' 2021-06-07 21:51:44 +01:00
Simon Barendse
68f084080e Merge pull request #40 from 1Password/feature/watch-namespaces-default
Watch all namespaces by default
2021-06-07 14:25:48 +02:00
mcmarkj
a428fe7462 GoFMT 2021-05-28 18:15:17 +01:00
mcmarkj
ea2d1f8a09 Typo 2021-05-28 18:11:10 +01:00
mcmarkj
bd96d50a9b Add Labels & Annotations from OPObject to Secret 2021-05-28 16:39:00 +01:00
Simon Barendse
859c9e3462 Watch all namespaces by default
When nothing is configured, watch all namespaces by default. This
makes it easier to get started.

It also makes configuring to watch all namespaces less akward
(currently you have to set the WATCH_NAMESPACE environment variable
to the empty string to configure the operator to watch all namespaces.
2021-05-14 14:26:30 +02:00
Simon Barendse
9dabac4a55 Merge pull request #35 from 1Password/auto-restart-annotation-example
Fix examples using the auto-restart annotation
2021-05-07 11:33:54 +02:00
Simon Barendse
d927a08790 Fix examples using the auto-restart annotation 2021-05-03 18:14:02 +02:00
Simon Barendse
933f7c4e2c Merge pull request #33 from lemichello/readme-token-name
Make token name used in README and deploy/operator.yaml consistent
2021-05-03 18:04:07 +02:00
Floris van der Grinten
81eb9a521f Merge pull request #34 from 1Password/support-links
Update documentation links
2021-05-03 18:02:09 +02:00
Simon Barendse
eb32bd7f94 Update documentation links
Also switch b5dev.com to 1password.com
2021-05-03 16:59:32 +02:00
Simon Barendse
a5781af949 Update documentation links
The documentation is moved to our main support site when the
operator was publicly released. The old URLs are redirected to
the new URLs used in this commit, however, when redirecting the
anchor is lost and the page is not scrolled to that position on
the page. This commit fixes that by changing the URLs to the new
URLs.
2021-05-03 16:56:49 +02:00
Maksym Lemich
0aa5781acd renamed the proposed secret name for the token 2021-05-03 14:07:59 +03:00
Joris Coenen
700be4426f Merge pull request #31 from 1Password/armv7-image
Add arm/v7 image
2021-04-30 17:43:07 +02:00
Joris Coenen
76ef9aa372 Merge pull request #30 from 1Password/fix-cli-version
Fix the version passed to the image
2021-04-30 16:39:29 +02:00
Joris Coenen
d7e6704314 Add arm/v7 image
Needed to run on a Raspberry Pi.

Arm/v6 would also be nice, but this does not seem to be supported by the current base image gcr.io/distroless/static:nonroot. So let's go with this for now.
2021-04-30 16:39:05 +02:00
Joris Coenen
2443979602 Fix the version passed to the image
Contrary to what internet resources say, ${{github.event.ref}} also contains the `ref/tags/` prefix. That is removed now.

Also, setting the version with plain "-X version.Version" does not seem to work consistently. Adding the full package as a prefix fixes this.
2021-04-30 16:12:45 +02:00
Joris Coenen
5b65196d31 Merge pull request #29 from 1Password/release/v1.0.1
Release v1.0.1
2021-04-30 14:31:35 +02:00
Joris Coenen
e7df8a485d Fix inconsistency in .VERSION file 2021-04-30 14:28:36 +02:00
Joris Coenen
ded76138da Prepare release v1.0.1 2021-04-30 14:24:33 +02:00
Joris Coenen
a5db6aeb81 Merge pull request #24 from 1Password/go-binaries-action
Create GitHub Actions workflow to release to Docker Hub
2021-04-30 11:15:33 +02:00
Joris Coenen
d45f682c37 Rename job to release-docker
Co-authored-by: Floris van der Grinten <floris.vandergrinten@agilebits.com>
2021-04-29 14:35:21 +02:00
Joris Coenen
d0c1235e58 Remove obsoleted goreleaser files 2021-04-23 18:45:06 +02:00
Joris Coenen
9e8f621020 Use docker buildx for building and pushing images
This has the benefit that every tag only shows up as one image. With goreleaser, multiple images were shipped
2021-04-23 18:40:15 +02:00
Joris Coenen
8dd7a28456 Merge pull request #26 from 1Password/issue-templates
Add GitHub issue templates
2021-04-22 18:38:29 +02:00
Joris Coenen
43b06dd7aa Add GitHub issue templates 2021-04-22 13:38:35 +02:00
Joris Coenen
e8e01d6578 Also push :latest tag 2021-04-21 19:06:13 +02:00
Joris Coenen
b53e017b77 GitHub Action steps for publishing images to DockerHub 2021-04-21 18:41:30 +02:00
Joris Coenen
b2565cebf8 Add GoReleaser configuration for publishing docker images
Should build both an amd64 and arm64 image and combine both in a single manifest. Does require some modifications to the GitHub Actions to correctly push to DockerHub.

Used this blog post as inspiration: https://carlosbecker.com/posts/multi-platform-docker-images-goreleaser-gh-actions/
2021-04-21 18:18:47 +02:00
Joris Coenen
9459d2e292 Merge pull request #25 from 1Password/readme-update
Minor README adjustments
2021-04-21 10:50:48 +02:00
jillianwilson
0409b17ef4 Minor README adjustments 2021-04-20 16:18:59 -03:00
2692 changed files with 2879 additions and 888233 deletions

View File

@@ -1 +1 @@
v1.0.0
1.4.0

4
.dockerignore Normal file
View File

@@ -0,0 +1,4 @@
# More info: https://docs.docker.com/engine/reference/builder/#dockerignore-file
# Ignore build and test binaries.
bin/
testbin/

36
.github/ISSUE_TEMPLATE/bug_report.md vendored Normal file
View File

@@ -0,0 +1,36 @@
---
name: Bug report
about: Report bugs and errors found while using the Operator.
title: ''
labels: bug
assignees: ''
---
### Your environment
<!-- Version of the Operator when the error occurred -->
Operator Version:
<!-- What version of the Connect server are you running?
You can get this information from the Integrations section in 1Password
https://start.1password.com/integrations/active
-->
Connect Server Version:
<!-- What version of Kubernetes have you deployed the operator to? -->
Kubernetes Version:
## What happened?
<!-- Describe the bug or error -->
## What did you expect to happen?
<!-- Describe what should have happened -->
## Steps to reproduce
1. <!-- Describe Steps to reproduce the issue -->
## Notes & Logs
<!-- Paste any logs here that may help with debugging.
Remember to remove any sensitive information before sharing! -->

9
.github/ISSUE_TEMPLATE/config.yml vendored Normal file
View File

@@ -0,0 +1,9 @@
# docs: https://docs.github.com/en/communities/using-templates-to-encourage-useful-issues-and-pull-requests/configuring-issue-templates-for-your-repository#configuring-the-template-chooser
blank_issues_enabled: true
contact_links:
- name: 1Password Community
url: https://1password.community/categories/secrets-automation
about: Please ask general Secrets Automation questions here.
- name: 1Password Security Bug Bounty
url: https://bugcrowd.com/agilebits
about: Please report security vulnerabilities here.

View File

@@ -0,0 +1,32 @@
---
name: Feature request
about: Suggest an idea for the Operator
title: ''
labels: feature-request
assignees: ''
---
### Summary
<!-- Briefly describe the feature in one or two sentences. You can include more details later. -->
### Use cases
<!-- Describe the use cases that make this feature useful to others.
The description should help the reader understand why the feature is necessary.
The better we understand your use case, the better we can help create an appropriate solution. -->
### Proposed solution
<!-- If you already have an idea for how the feature should work, use this space to describe it.
We'll work with you to find a workable approach, and any implementation details are appreciated.
-->
### Is there a workaround to accomplish this today?
<!-- If there's a way to accomplish this feature request without changes to the codebase, we'd like to hear it.
-->
### References & Prior Work
<!-- If a similar feature was implemented in another project or tool, add a link so we can better understand your request.
Links to relevant documentation or RFCs are also appreciated. -->
* <!-- Reference 1 -->
* <!-- Reference 2, etc -->

View File

@@ -1,13 +1,15 @@
name: goreleaser
name: release
on:
push:
tags:
- '*'
- 'v*'
jobs:
goreleaser:
release-docker:
runs-on: ubuntu-latest
env:
DOCKER_CLI_EXPERIMENTAL: "enabled"
steps:
-
name: Checkout
@@ -15,15 +17,41 @@ jobs:
with:
fetch-depth: 0
-
name: Set up Go
uses: actions/setup-go@v2
name: Docker meta
id: meta
uses: crazy-max/ghaction-docker-meta@v2
with:
go-version: 1.15
images: |
1password/onepassword-operator
# Publish image for x.y.z and x.y
# The latest tag is automatically added for semver tags
tags: |
type=semver,pattern={{version}}
type=semver,pattern={{major}}.{{minor}}
- name: Get the version from tag
id: get_version
run: echo ::set-output name=VERSION::${GITHUB_REF#refs/tags/v}
-
name: Run GoReleaser
uses: goreleaser/goreleaser-action@v2
name: Set up QEMU
uses: docker/setup-qemu-action@v1
-
name: Set up Docker Buildx
uses: docker/setup-buildx-action@v1
-
name: Docker Login
uses: docker/login-action@v1
with:
version: latest
args: release --rm-dist
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
-
name: Build and push
uses: docker/build-push-action@v2
with:
context: .
file: Dockerfile
platforms: linux/amd64,linux/arm64,linux/arm/v7
push: true
tags: ${{ steps.meta.outputs.tags }}
labels: ${{ steps.meta.outputs.labels }}
build-args: |
operator_version=${{ steps.get_version.outputs.VERSION }}

87
.gitignore vendored
View File

@@ -1,80 +1,25 @@
# Temporary Build Files
build/_output
build/_test
# Created by https://www.gitignore.io/api/go,vim,emacs,visualstudiocode
### Emacs ###
# -*- mode: gitignore; -*-
*~
\#*\#
/.emacs.desktop
/.emacs.desktop.lock
*.elc
auto-save-list
tramp
.\#*
# Org-mode
.org-id-locations
*_archive
# flymake-mode
*_flymake.*
# eshell files
/eshell/history
/eshell/lastdir
# elpa packages
/elpa/
# reftex files
*.rel
# AUCTeX auto folder
/auto/
# cask packages
.cask/
dist/
# Flycheck
flycheck_*.el
# server auth directory
/server/
# projectiles files
.projectile
projectile-bookmarks.eld
# directory configuration
.dir-locals.el
# saveplace
places
# url cache
url/cache/
# cedet
ede-projects.el
# smex
smex-items
# company-statistics
company-statistics-cache.el
# anaconda-mode
anaconda-mode/
### Go ###
# Binaries for programs and plugins
*.exe
*.exe~
*.dll
*.so
*.dylib
# Test binary, build with 'go test -c'
bin
testbin/*
# Test binary, build with `go test -c`
*.test
# Output of the go coverage tool, specifically when used with LiteIDE
*.out
### Vim ###
# swap
.sw[a-p]
.*.sw[a-p]
# session
Session.vim
# temporary
.netrwhist
# auto-generated tag files
tags
### VisualStudioCode ###
.vscode/*
.history
.DS_Store
op-ss-client/
.idea/
# End of https://www.gitignore.io/api/go,vim,emacs,visualstudiocode
# Kubernetes Generated files - skip generated files, except for vendored files
!vendor/**/zz_generated.*
# editor and IDE paraphernalia
.idea
*.swp
*.swo
*~

View File

@@ -12,43 +12,124 @@
---
[//]: # (START/v1.0.0)
[//]: # "START/v1.4.1"
# v1.4.1
## Fixes
- OwnerReferences on secrets are now persisted after an item is updated. {#101}
- Annotations from a Deployment or OnePasswordItem are no longer applied to Secrets that are created for it. {#102}
---
[//]: # "START/v1.4.0"
# v1.4.0
## Features
- The operator now declares the an OwnerReference for the secrets it manages. This should stop secrets from getting pruned by tools like Argo CD. {#51,#84,#96}
---
[//]: # "START/v1.3.0"
# v1.3.0
## Features
- Added support for loading secrets from files stored in 1Password. {#47}
---
[//]: # "START/v1.2.0"
# v1.2.0
## Features
- Support secrets provisioned through FromEnv. {#74}
- Support configuration of Kubernetes Secret type. {#87}
- Improved logging. (#72)
---
[//]: # "START/v1.1.0"
# v1.1.0
## Fixes
- Fix normalization for keys in a Secret's `data` section to allow upper- and lower-case alphanumeric characters. {#66}
---
[//]: # "START/v1.0.2"
# v1.0.2
## Fixes
- Name normalizer added to handle non-conforming item names.
---
[//]: # "START/v1.0.1"
# v1.0.1
## Features
- This release also contains an arm64 Docker image. {#20}
- Docker images are also pushed to the :latest and :<major>.<minor> tags.
---
[//]: # "START/v1.0.0"
# v1.0.0
## Features:
* Option to automatically deploy 1Password Connect via the operator
* Ignore restart annotation when looking for 1Password annotations
* Release Automation
* Upgrading apiextensions.k8s.io/v1beta apiversion from the operator custom resource
* Adding configuration for auto rolling restart on deployments
* Configure Auto Restarts for a OnePasswordItem Custom Resource
* Update Connect Dependencies to latest
* Add Github action for building and testing operator
- Option to automatically deploy 1Password Connect via the operator
- Ignore restart annotation when looking for 1Password annotations
- Release Automation
- Upgrading apiextensions.k8s.io/v1beta apiversion from the operator custom resource
- Adding configuration for auto rolling restart on deployments
- Configure Auto Restarts for a OnePasswordItem Custom Resource
- Update Connect Dependencies to latest
- Add Github action for building and testing operator
## Fixes:
* Fix spec field example for OnePasswordItem in readme
* Casing of annotations are now consistent
- Fix spec field example for OnePasswordItem in readme
- Casing of annotations are now consistent
---
[//]: # (START/v0.0.2)
[//]: # "START/v0.0.2"
# v0.0.2
## Features:
* Items can now be accessed by either `vaults/<vault_id>/items/<item_id>` or `vaults/<vault_title>/items/<item_title>`
- Items can now be accessed by either `vaults/<vault_id>/items/<item_id>` or `vaults/<vault_title>/items/<item_title>`
---
[//]: # (START/v0.0.1)
[//]: # "START/v0.0.1"
# v0.0.1
Initial 1Password Operator release
## Features
* watches for deployment creations with `onepassword` annotations and creates an associated kubernetes secret
* watches for `onepasswordsecret` crd creations and creates an associated kubernetes secrets
* watches for changes to 1Password secrets associated with kubernetes secrets and updates the kubernetes secret with changes
* restart pods when secret has been updated
* cleanup of kubernetes secrets when deployment or `onepasswordsecret` is deleted
- watches for deployment creations with `onepassword` annotations and creates an associated kubernetes secret
- watches for `onepasswordsecret` crd creations and creates an associated kubernetes secrets
- watches for changes to 1Password secrets associated with kubernetes secrets and updates the kubernetes secret with changes
- restart pods when secret has been updated
- cleanup of kubernetes secrets when deployment or `onepasswordsecret` is deleted
---

View File

@@ -1,33 +1,27 @@
# Build the manager binary
FROM golang:1.13 as builder
FROM golang:1.17 as builder
WORKDIR /workspace
# Copy the Go Modules manifests
COPY go.mod go.mod
COPY go.sum go.sum
# cache deps before building and copying source so that we don't need to re-download as much
# and so that source changes don't invalidate our downloaded layer
RUN go mod download
# Copy the go source
COPY cmd/manager/main.go main.go
COPY pkg/ pkg/
COPY version/ version/
COPY vendor/ vendor/
COPY main.go main.go
COPY api/ api/
COPY controllers/ controllers/
# Build
ARG operator_version=dev
RUN CGO_ENABLED=0 \
GOOS=linux \
GOARCH=amd64 \
GO111MODULE=on \
go build \
-ldflags "-X version.Version=$operator_version" \
-mod vendor \
-a -o manager main.go
RUN CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -a -o manager main.go
# Use distroless as minimal base image to package the manager binary
# Refer to https://github.com/GoogleContainerTools/distroless for more details
FROM gcr.io/distroless/static:nonroot
WORKDIR /
COPY --from=builder /workspace/manager .
USER nonroot:nonroot
COPY deploy/connect/ deploy/connect/
USER 65532:65532
ENTRYPOINT ["/manager"]

295
Makefile
View File

@@ -1,68 +1,233 @@
export MAIN_BRANCH ?= main
# VERSION defines the project version for the bundle.
# Update this value when you upgrade the version of your project.
# To re-generate a bundle for another specific version without changing the standard setup, you can:
# - use the VERSION as arg of the bundle target (e.g make bundle VERSION=0.0.2)
# - use environment variables to overwrite this value (e.g export VERSION=0.0.2)
VERSION ?= 0.0.1
.DEFAULT_GOAL := help
.PHONY: test build build/binary build/local clean test/coverage release/prepare release/tag .check_bump_type .check_git_clean help
GIT_BRANCH := $(shell git symbolic-ref --short HEAD)
WORKTREE_CLEAN := $(shell git status --porcelain 1>/dev/null 2>&1; echo $$?)
SCRIPTS_DIR := $(CURDIR)/scripts
versionFile = $(CURDIR)/.VERSION
curVersion := $(shell cat $(versionFile) | sed 's/^v//')
OPERATOR_NAME := onepassword-connect-operator
DOCKER_IMG_TAG ?= $(OPERATOR_NAME):v$(curVersion)
test: ## Run test suite
go test ./...
test/coverage: ## Run test suite with coverage report
go test -v ./... -cover
build: ## Build operator Docker image
@docker build -f Dockerfile --build-arg operator_version=$(curVersion) -t $(DOCKER_IMG_TAG)
@echo "Successfully built and tagged image."
@echo "Tag: $(DOCKER_IMG_TAG)"
build/local: ## Build local version of the operator Docker image
@docker build -f Dockerfile -t local/$(DOCKER_IMG_TAG)
build/binary: clean ## Build operator binary
@mkdir -p dist
@go build -mod vendor -a -o manager ./cmd/manager/main.go
@mv manager ./dist
clean:
rm -rf ./dist
help: ## Prints this help message
@grep -E '^[\/a-zA-Z_-]+:.*?## .*$$' $(MAKEFILE_LIST) | sort | awk 'BEGIN {FS = ":.*?## "}; {printf "\033[36m%-30s\033[0m %s\n", $$1, $$2}'
## Release functions =====================
release/prepare: .check_git_clean ## Updates changelog and creates release branch (call with 'release/prepare version=<new_version_number>')
@test $(version) || (echo "[ERROR] version argument not set."; exit 1)
@git fetch --quiet origin $(MAIN_BRANCH)
@echo $(version) | tr -d '\n' | tee $(versionFile) &>/dev/null
@NEW_VERSION=$(version) $(SCRIPTS_DIR)/prepare-release.sh
release/tag: .check_git_clean ## Creates git tag
@git pull --ff-only
@echo "Applying tag 'v$(curVersion)' to HEAD..."
@git tag --sign "v$(curVersion)" -m "Release v$(curVersion)"
@echo "[OK] Success!"
@echo "Remember to call 'git push --tags' to persist the tag."
## Helper functions =====================
.check_git_clean:
ifneq ($(GIT_BRANCH), $(MAIN_BRANCH))
@echo "[ERROR] Please checkout default branch '$(MAIN_BRANCH)' and re-run this command."; exit 1;
# CHANNELS define the bundle channels used in the bundle.
# Add a new line here if you would like to change its default config. (E.g CHANNELS = "candidate,fast,stable")
# To re-generate a bundle for other specific channels without changing the standard setup, you can:
# - use the CHANNELS as arg of the bundle target (e.g make bundle CHANNELS=candidate,fast,stable)
# - use environment variables to overwrite this value (e.g export CHANNELS="candidate,fast,stable")
ifneq ($(origin CHANNELS), undefined)
BUNDLE_CHANNELS := --channels=$(CHANNELS)
endif
ifneq ($(WORKTREE_CLEAN), 0)
@echo "[ERROR] Uncommitted changes found in worktree. Address them and try again."; exit 1;
# DEFAULT_CHANNEL defines the default channel used in the bundle.
# Add a new line here if you would like to change its default config. (E.g DEFAULT_CHANNEL = "stable")
# To re-generate a bundle for any other default channel without changing the default setup, you can:
# - use the DEFAULT_CHANNEL as arg of the bundle target (e.g make bundle DEFAULT_CHANNEL=stable)
# - use environment variables to overwrite this value (e.g export DEFAULT_CHANNEL="stable")
ifneq ($(origin DEFAULT_CHANNEL), undefined)
BUNDLE_DEFAULT_CHANNEL := --default-channel=$(DEFAULT_CHANNEL)
endif
BUNDLE_METADATA_OPTS ?= $(BUNDLE_CHANNELS) $(BUNDLE_DEFAULT_CHANNEL)
# IMAGE_TAG_BASE defines the docker.io namespace and part of the image name for remote images.
# This variable is used to construct full image tags for bundle and catalog images.
#
# For example, running 'make bundle-build bundle-push catalog-build catalog-push' will build and push both
# onepassword.com/onepassword-operator-new-bundle:$VERSION and onepassword.com/onepassword-operator-new-catalog:$VERSION.
IMAGE_TAG_BASE ?= onepassword.com/onepassword-operator-new
# BUNDLE_IMG defines the image:tag used for the bundle.
# You can use it as an arg. (E.g make bundle-build BUNDLE_IMG=<some-registry>/<project-name-bundle>:<tag>)
BUNDLE_IMG ?= $(IMAGE_TAG_BASE)-bundle:v$(VERSION)
# BUNDLE_GEN_FLAGS are the flags passed to the operator-sdk generate bundle command
BUNDLE_GEN_FLAGS ?= -q --overwrite --version $(VERSION) $(BUNDLE_METADATA_OPTS)
# USE_IMAGE_DIGESTS defines if images are resolved via tags or digests
# You can enable this value if you would like to use SHA Based Digests
# To enable set flag to true
USE_IMAGE_DIGESTS ?= false
ifeq ($(USE_IMAGE_DIGESTS), true)
BUNDLE_GEN_FLAGS += --use-image-digests
endif
# Image URL to use all building/pushing image targets
IMG ?= controller:latest
# ENVTEST_K8S_VERSION refers to the version of kubebuilder assets to be downloaded by envtest binary.
ENVTEST_K8S_VERSION = 1.23
# Get the currently used golang install path (in GOPATH/bin, unless GOBIN is set)
ifeq (,$(shell go env GOBIN))
GOBIN=$(shell go env GOPATH)/bin
else
GOBIN=$(shell go env GOBIN)
endif
# Setting SHELL to bash allows bash commands to be executed by recipes.
# This is a requirement for 'setup-envtest.sh' in the test target.
# Options are set to exit when a recipe line exits non-zero or a piped command fails.
SHELL = /usr/bin/env bash -o pipefail
.SHELLFLAGS = -ec
.PHONY: all
all: build
##@ General
# The help target prints out all targets with their descriptions organized
# beneath their categories. The categories are represented by '##@' and the
# target descriptions by '##'. The awk commands is responsible for reading the
# entire set of makefiles included in this invocation, looking for lines of the
# file as xyz: ## something, and then pretty-format the target and help. Then,
# if there's a line with ##@ something, that gets pretty-printed as a category.
# More info on the usage of ANSI control characters for terminal formatting:
# https://en.wikipedia.org/wiki/ANSI_escape_code#SGR_parameters
# More info on the awk command:
# http://linuxcommand.org/lc3_adv_awk.php
.PHONY: help
help: ## Display this help.
@awk 'BEGIN {FS = ":.*##"; printf "\nUsage:\n make \033[36m<target>\033[0m\n"} /^[a-zA-Z_0-9-]+:.*?##/ { printf " \033[36m%-15s\033[0m %s\n", $$1, $$2 } /^##@/ { printf "\n\033[1m%s\033[0m\n", substr($$0, 5) } ' $(MAKEFILE_LIST)
##@ Development
.PHONY: manifests
manifests: controller-gen ## Generate WebhookConfiguration, ClusterRole and CustomResourceDefinition objects.
$(CONTROLLER_GEN) rbac:roleName=manager-role crd webhook paths="./..." output:crd:artifacts:config=config/crd/bases
.PHONY: generate
generate: controller-gen ## Generate code containing DeepCopy, DeepCopyInto, and DeepCopyObject method implementations.
$(CONTROLLER_GEN) object:headerFile="hack/boilerplate.go.txt" paths="./..."
.PHONY: fmt
fmt: ## Run go fmt against code.
go fmt ./...
.PHONY: vet
vet: ## Run go vet against code.
go vet ./...
.PHONY: test
test: manifests generate fmt vet envtest ## Run tests.
KUBEBUILDER_ASSETS="$(shell $(ENVTEST) use $(ENVTEST_K8S_VERSION) -p path)" go test ./... -coverprofile cover.out
##@ Build
.PHONY: build
build: generate fmt vet ## Build manager binary.
go build -o bin/manager main.go
.PHONY: run
run: manifests generate fmt vet ## Run a controller from your host.
go run ./main.go
.PHONY: docker-build
docker-build: test ## Build docker image with the manager.
docker build -t ${IMG} .
.PHONY: docker-push
docker-push: ## Push docker image with the manager.
docker push ${IMG}
##@ Deployment
ifndef ignore-not-found
ignore-not-found = false
endif
.PHONY: install
install: manifests kustomize ## Install CRDs into the K8s cluster specified in ~/.kube/config.
$(KUSTOMIZE) build config/crd | kubectl apply -f -
.PHONY: uninstall
uninstall: manifests kustomize ## Uninstall CRDs from the K8s cluster specified in ~/.kube/config. Call with ignore-not-found=true to ignore resource not found errors during deletion.
$(KUSTOMIZE) build config/crd | kubectl delete --ignore-not-found=$(ignore-not-found) -f -
.PHONY: deploy
deploy: manifests kustomize ## Deploy controller to the K8s cluster specified in ~/.kube/config.
cd config/manager && $(KUSTOMIZE) edit set image controller=${IMG}
$(KUSTOMIZE) build config/default | kubectl apply -f -
.PHONY: undeploy
undeploy: ## Undeploy controller from the K8s cluster specified in ~/.kube/config. Call with ignore-not-found=true to ignore resource not found errors during deletion.
$(KUSTOMIZE) build config/default | kubectl delete --ignore-not-found=$(ignore-not-found) -f -
CONTROLLER_GEN = $(shell pwd)/bin/controller-gen
.PHONY: controller-gen
controller-gen: ## Download controller-gen locally if necessary.
$(call go-get-tool,$(CONTROLLER_GEN),sigs.k8s.io/controller-tools/cmd/controller-gen@v0.8.0)
KUSTOMIZE = $(shell pwd)/bin/kustomize
.PHONY: kustomize
kustomize: ## Download kustomize locally if necessary.
$(call go-get-tool,$(KUSTOMIZE),sigs.k8s.io/kustomize/kustomize/v3@v3.8.7)
ENVTEST = $(shell pwd)/bin/setup-envtest
.PHONY: envtest
envtest: ## Download envtest-setup locally if necessary.
$(call go-get-tool,$(ENVTEST),sigs.k8s.io/controller-runtime/tools/setup-envtest@latest)
# go-get-tool will 'go get' any package $2 and install it to $1.
PROJECT_DIR := $(shell dirname $(abspath $(lastword $(MAKEFILE_LIST))))
define go-get-tool
@[ -f $(1) ] || { \
set -e ;\
TMP_DIR=$$(mktemp -d) ;\
cd $$TMP_DIR ;\
go mod init tmp ;\
echo "Downloading $(2)" ;\
GOBIN=$(PROJECT_DIR)/bin go get $(2) ;\
rm -rf $$TMP_DIR ;\
}
endef
.PHONY: bundle
bundle: manifests kustomize ## Generate bundle manifests and metadata, then validate generated files.
operator-sdk generate kustomize manifests -q
cd config/manager && $(KUSTOMIZE) edit set image controller=$(IMG)
$(KUSTOMIZE) build config/manifests | operator-sdk generate bundle $(BUNDLE_GEN_FLAGS)
operator-sdk bundle validate ./bundle
.PHONY: bundle-build
bundle-build: ## Build the bundle image.
docker build -f bundle.Dockerfile -t $(BUNDLE_IMG) .
.PHONY: bundle-push
bundle-push: ## Push the bundle image.
$(MAKE) docker-push IMG=$(BUNDLE_IMG)
.PHONY: opm
OPM = ./bin/opm
opm: ## Download opm locally if necessary.
ifeq (,$(wildcard $(OPM)))
ifeq (,$(shell which opm 2>/dev/null))
@{ \
set -e ;\
mkdir -p $(dir $(OPM)) ;\
OS=$(shell go env GOOS) && ARCH=$(shell go env GOARCH) && \
curl -sSLo $(OPM) https://github.com/operator-framework/operator-registry/releases/download/v1.19.1/$${OS}-$${ARCH}-opm ;\
chmod +x $(OPM) ;\
}
else
OPM = $(shell which opm)
endif
endif
# A comma-separated list of bundle images (e.g. make catalog-build BUNDLE_IMGS=example.com/operator-bundle:v0.1.0,example.com/operator-bundle:v0.2.0).
# These images MUST exist in a registry and be pull-able.
BUNDLE_IMGS ?= $(BUNDLE_IMG)
# The image tag given to the resulting catalog image (e.g. make catalog-build CATALOG_IMG=example.com/operator-catalog:v0.2.0).
CATALOG_IMG ?= $(IMAGE_TAG_BASE)-catalog:v$(VERSION)
# Set CATALOG_BASE_IMG to an existing catalog image tag to add $BUNDLE_IMGS to that image.
ifneq ($(origin CATALOG_BASE_IMG), undefined)
FROM_INDEX_OPT := --from-index $(CATALOG_BASE_IMG)
endif
# Build a catalog image by adding bundle images to an empty catalog using the operator package manager tool, 'opm'.
# This recipe invokes 'opm' in 'semver' bundle add mode. For more information on add modes, see:
# https://github.com/operator-framework/community-operators/blob/7f1438c/docs/packaging-operator.md#updating-your-existing-operator
.PHONY: catalog-build
catalog-build: opm ## Build a catalog image.
$(OPM) index add --container-tool docker --mode semver --tag $(CATALOG_IMG) --bundles $(BUNDLE_IMGS) $(FROM_INDEX_OPT)
# Push the catalog image.
.PHONY: catalog-push
catalog-push: ## Push a catalog image.
$(MAKE) docker-push IMG=$(CATALOG_IMG)

19
PROJECT Normal file
View File

@@ -0,0 +1,19 @@
domain: onepassword.com
layout:
- go.kubebuilder.io/v3
plugins:
manifests.sdk.operatorframework.io/v2: {}
scorecard.sdk.operatorframework.io/v2: {}
projectName: onepassword-operator-new
repo: github.com/1Password/onepassword-operator
resources:
- api:
crdVersion: v1
namespaced: true
controller: true
domain: onepassword.com
group: onepassword
kind: OnePasswordItem
path: github.com/1Password/onepassword-operator/api/v1
version: v1
version: "3"

View File

@@ -1,3 +1,5 @@
// TODO: Update README.md
# 1Password Connect Kubernetes Operator
The 1Password Connect Kubernetes Operator provides the ability to integrate Kubernetes with 1Password. This Operator manages `OnePasswordItem` Custom Resource Definitions (CRDs) that define the location of an Item stored in 1Password. The `OnePasswordItem` CRD, when created, will be used to compose a Kubernetes Secret containing the contents of the specified item.
@@ -13,8 +15,8 @@ Prerequisites:
- [1Password Command Line Tool Installed](https://1password.com/downloads/command-line/)
- [kubectl installed](https://kubernetes.io/docs/tasks/tools/install-kubectl/)
- [docker installed](https://docs.docker.com/get-docker/)
- [Generated a 1password-credentials.json file and issued a 1Password Connect API Token for the K8s Operator integration](https://support.b5dev.com/cs/connect)
- [1Password Connect deployed to Kubernetes](https://support.b5dev.com/cs/connect-deploy-kubernetes/#step-2-deploy-a-connect-server). **NOTE**: If customization of the 1Password Connect deployment is not required you can skip this prerequisite.
- [Generated a 1password-credentials.json file and issued a 1Password Connect API Token for the K8s Operator integration](https://support.1password.com/secrets-automation/)
- [1Password Connect deployed to Kubernetes](https://support.1password.com/connect-deploy-kubernetes/#step-2-deploy-a-1password-connect-server). **NOTE**: If customization of the 1Password Connect deployment is not required you can skip this prerequisite.
### Quickstart for Deploying 1Password Connect to Kubernetes
@@ -30,14 +32,13 @@ If 1Password Connect is already running, you can skip this step. This guide will
Encode the 1password-credentials.json file you generated in the prerequisite steps and save it to a file named op-session:
```bash
$ cat 1password-credentials.json | base64 | \
cat 1password-credentials.json | base64 | \
tr '/+' '_-' | tr -d '=' | tr -d '\n' > op-session
```
Create a Kubernetes secret from the op-session file:
```bash
$ kubectl create secret generic op-credentials --from-file=1password-credentials.json
kubectl create secret generic op-credentials --from-file=op-session
```
Add the following environment variable to the onepassword-connect-operator container in `deploy/operator.yaml`:
@@ -53,28 +54,28 @@ Adding this environment variable will have the operator automatically deploy a d
"Create a Connect token for the operator and save it as a Kubernetes Secret:
```bash
$ kubectl create secret generic op-operator-connect-token --from-literal=token=<OP_CONNECT_TOKEN>"
kubectl create secret generic onepassword-token --from-literal=token=<OP_CONNECT_TOKEN>"
```
If you do not have a token for the operator, you can generate a token and save it to kubernetes with the following command:
```bash
$ kubectl create secret generic op-operator-connect-token --from-literal=token=$(op create connect token <server> op-k8s-operator --vault <vault>)
kubectl create secret generic onepassword-token --from-literal=token=$(op create connect token <server> op-k8s-operator --vault <vault>)
```
[More information on generating a token can be found here](https://support.1password.com/cs/secrets-automation/#appendix-issue-additional-access-tokens)
[More information on generating a token can be found here](https://support.1password.com/secrets-automation/#appendix-issue-additional-access-tokens)
**Set Permissions For Operator**
We must create a service account, role, and role binding and Kubernetes. Examples can be found in the `/deploy` folder.
```bash
$ kubectl apply -f deploy/permissions.yaml
kubectl apply -f deploy/permissions.yaml
```
**Create Custom One Password Secret Resource**
```bash
$ kubectl apply -f deploy/crds/onepassword.com_onepassworditems_crd.yaml
kubectl apply -f deploy/crds/onepassword.com_onepassworditems_crd.yaml
```
**Deploying the Operator**
@@ -84,9 +85,9 @@ An sample Deployment yaml can be found at `/deploy/operator.yaml`.
To further configure the 1Password Kubernetes Operator the Following Environment variables can be set in the operator yaml:
- **WATCH_NAMESPACE:** comma separated list of what Namespaces to watch for changes.
- **OP_CONNECT_HOST** (required): Specifies the host name within Kubernetes in which to access the 1Password Connect.
- **POLLING_INTERVAL** (default: 600)**:** The number of seconds the 1Password Kubernetes Operator will wait before checking for updates from 1Password Connect.
- **WATCH_NAMESPACE:** (default: watch all namespaces): Comma separated list of what Namespaces to watch for changes.
- **POLLING_INTERVAL** (default: 600): The number of seconds the 1Password Kubernetes Operator will wait before checking for updates from 1Password Connect.
- **MANAGE_CONNECT** (default: false): If set to true, on deployment of the operator, a default configuration of the OnePassword Connect Service will be deployed to the `default` namespace.
- **AUTO_RESTART** (default: false): If set to true, the operator will restart any deployment using a secret from 1Password Connect. This can be overwritten by namespace, deployment, or individual secret. More details on AUTO_RESTART can be found in the ["Configuring Automatic Rolling Restarts of Deployments"](#configuring-automatic-rolling-restarts-of-deployments) section.
@@ -102,7 +103,7 @@ To create a Kubernetes Secret from a 1Password item, create a yaml file with the
```yaml
apiVersion: onepassword.com/v1
kind: OnePasswordItem # {insert_new_name}
kind: OnePasswordItem
metadata:
name: <item_name> #this name will also be used for naming the generated kubernetes secret
spec:
@@ -112,13 +113,13 @@ spec:
Deploy the OnePasswordItem to Kubernetes:
```bash
$ kubectl apply -f <your_item>.yaml
kubectl apply -f <your_item>.yaml
```
To test that the Kubernetes Secret check that the following command returns a secret:
```bash
$ kubectl get secret <secret_name>
kubectl get secret <secret_name>
```
Note: Deleting the `OnePasswordItem` that you've created will automatically delete the created Kubernetes Secret.
@@ -131,12 +132,17 @@ kind: Deployment
metadata:
name: deployment-example
annotations:
operator.1password.io/item-path: "vaults/{vault_id_or_title}/items/{item_id_or_title}"
operator.1password.io/item-name: "{secret_name}"
operator.1password.io/item-path: "vaults/<vault_id_or_title>/items/<item_id_or_title>"
operator.1password.io/item-name: "<secret_name>"
```
Applying this yaml file will create a Kubernetes Secret with the name `<secret_name>` and contents from the location specified at the specified Item Path.
The contents of the Kubernetes secret will be key-value pairs in which the keys are the fields of the 1Password item and the values are the corresponding values stored in 1Password.
In case of fields that store files, the file's contents will be used as the value.
Within an item, if both a field storing a file and a field of another type have the same name, the file field will be ignored and the other field will take precedence.
Note: Deleting the Deployment that you've created will automatically delete the created Kubernetes Secret only if the deployment is still annotated with `operator.1password.io/item-path` and `operator.1password.io/item-name` and no other deployment is using the secret.
If a 1Password Item that is linked to a Kubernetes Secret is updated within the POLLING_INTERVAL the associated Kubernetes Secret will be updated. However, if you do not want a specific secret to be updated you can add the tag `operator.1password.io:ignore-secret` to the item stored in 1Password. While this tag is in place, any updates made to an item will not trigger an update to the associated secret in Kubernetes.
@@ -144,7 +150,12 @@ If a 1Password Item that is linked to a Kubernetes Secret is updated within the
---
**NOTE**
If multiple 1Password vaults/items have the same `title` when using a title in the access path, the desired action will be performed on the oldest vault/item. Furthermore, titles that include white space characters cannot be used.
If multiple 1Password vaults/items have the same `title` when using a title in the access path, the desired action will be performed on the oldest vault/item.
Titles and field names that include white space and other characters that are not a valid [DNS subdomain name](https://kubernetes.io/docs/concepts/configuration/secret/) will create Kubernetes secrets that have titles and fields in the following format:
- Invalid characters before the first alphanumeric character and after the last alphanumeric character will be removed
- All whitespaces between words will be replaced by `-`
- All the letters will be lower-cased.
---
@@ -163,9 +174,10 @@ apiVersion: v1
kind: Namespace
metadata:
name: "example-namespace"
operator.1password.io/auto-restart: "true"
annotations:
operator.1password.io/auto-restart: "true"
```
If the value is not set, the auto reset settings on the operator will be used. This value can be overwritten by deployment.
If the value is not set, the auto restart settings on the operator will be used. This value can be overwritten by deployment.
**Per Deployment**
This method allows for managing auto restarts on a given deployment. Auto restarts can by managed by setting the annotation `operator.1password.io/auto-restart` to either `true` or `false` on the desired deployment. An example of this is shown below:
@@ -175,9 +187,10 @@ apiVersion: v1
kind: Deployment
metadata:
name: "example-deployment"
operator.1password.io/auto-restart: "true"
annotations:
operator.1password.io/auto-restart: "true"
```
If the value is not set, the auto reset settings on the namespace will be used.
If the value is not set, the auto restart settings on the namespace will be used.
**Per OnePasswordItem Custom Resource**
This method allows for managing auto restarts on a given OnePasswordItem custom resource. Auto restarts can by managed by setting the annotation `operator.1password.io/auto_restart` to either `true` or `false` on the desired OnePasswordItem. An example of this is shown below:
@@ -187,9 +200,10 @@ apiVersion: onepassword.com/v1
kind: OnePasswordItem
metadata:
name: example
operator.1password.io/auto-restart: "true"
annotations:
operator.1password.io/auto-restart: "true"
```
If the value is not set, the auto reset settings on the deployment will be used.
If the value is not set, the auto restart settings on the deployment will be used.
## Development
@@ -224,4 +238,4 @@ make test/coverage
Please file requests via [**BugCrowd**](https://bugcrowd.com/agilebits).
For information about security practices, please visit our [Security homepage](https://bugcrowd.com/agilebits).
For information about security practices, please visit our [Security homepage](https://bugcrowd.com/agilebits).

View File

@@ -0,0 +1,36 @@
/*
Copyright 2022.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package v1 contains API Schema definitions for the onepassword v1 API group
//+kubebuilder:object:generate=true
//+groupName=onepassword.onepassword.com
package v1
import (
"k8s.io/apimachinery/pkg/runtime/schema"
"sigs.k8s.io/controller-runtime/pkg/scheme"
)
var (
// GroupVersion is group version used to register these objects
GroupVersion = schema.GroupVersion{Group: "onepassword.onepassword.com", Version: "v1"}
// SchemeBuilder is used to add go types to the GroupVersionKind scheme
SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion}
// AddToScheme adds the types in this group-version to the given scheme.
AddToScheme = SchemeBuilder.AddToScheme
)

View File

@@ -1,37 +1,57 @@
/*
Copyright 2022.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN!
// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized.
// OnePasswordItemSpec defines the desired state of OnePasswordItem
type OnePasswordItemSpec struct {
// INSERT ADDITIONAL SPEC FIELDS - desired state of cluster
// Important: Run "make" to regenerate code after modifying this file
// Foo is an example field of OnePasswordItem. Edit onepassworditem_types.go to remove/update
ItemPath string `json:"itemPath,omitempty"`
}
// OnePasswordItemStatus defines the observed state of OnePasswordItem
type OnePasswordItemStatus struct {
// INSERT ADDITIONAL STATUS FIELD - define observed state of cluster
// Important: Run "operator-sdk generate k8s" to regenerate code after modifying this file
// Add custom validation using kubebuilder tags: https://book-v1.book.kubebuilder.io/beyond_basics/generating_crd.html
// Important: Run "make" to regenerate code after modifying this file
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
//+kubebuilder:object:root=true
//+kubebuilder:subresource:status
// OnePasswordItem is the Schema for the onepassworditems API
// +kubebuilder:subresource:status
// +kubebuilder:resource:path=onepassworditems,scope=Namespaced
type OnePasswordItem struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
Type string `json:"type,omitempty"`
Spec OnePasswordItemSpec `json:"spec,omitempty"`
Status OnePasswordItemStatus `json:"status,omitempty"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
//+kubebuilder:object:root=true
// OnePasswordItemList contains a list of OnePasswordItem
type OnePasswordItemList struct {

View File

@@ -1,6 +1,23 @@
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
// Code generated by operator-sdk. DO NOT EDIT.
/*
Copyright 2022.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by controller-gen. DO NOT EDIT.
package v1
@@ -15,7 +32,6 @@ func (in *OnePasswordItem) DeepCopyInto(out *OnePasswordItem) {
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
out.Spec = in.Spec
out.Status = in.Status
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OnePasswordItem.
@@ -48,7 +64,6 @@ func (in *OnePasswordItemList) DeepCopyInto(out *OnePasswordItemList) {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OnePasswordItemList.
@@ -72,7 +87,6 @@ func (in *OnePasswordItemList) DeepCopyObject() runtime.Object {
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *OnePasswordItemSpec) DeepCopyInto(out *OnePasswordItemSpec) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OnePasswordItemSpec.
@@ -88,7 +102,6 @@ func (in *OnePasswordItemSpec) DeepCopy() *OnePasswordItemSpec {
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *OnePasswordItemStatus) DeepCopyInto(out *OnePasswordItemStatus) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OnePasswordItemStatus.

View File

@@ -1,15 +0,0 @@
FROM registry.access.redhat.com/ubi8/ubi-minimal:latest
ENV OPERATOR=/usr/local/bin/onepassword-connect-operator \
USER_UID=1001 \
USER_NAME=onepassword-connect-operator
# install operator binary
COPY build/_output/bin/op-kubernetes-connect-operator ${OPERATOR}
COPY build/bin /usr/local/bin
RUN /usr/local/bin/user_setup
ENTRYPOINT ["/usr/local/bin/entrypoint"]
USER ${USER_UID}

View File

@@ -1,3 +0,0 @@
#!/bin/sh -e
exec ${OPERATOR} $@

View File

@@ -1,11 +0,0 @@
#!/bin/sh
set -x
# ensure $HOME exists and is accessible by group 0 (we don't know what the runtime UID will be)
echo "${USER_NAME}:x:${USER_UID}:0:${USER_NAME} user:${HOME}:/sbin/nologin" >> /etc/passwd
mkdir -p "${HOME}"
chown "${USER_UID}:0" "${HOME}"
chmod ug+rwx "${HOME}"
# no need for this script to remain in the image after running
rm "$0"

View File

@@ -1,300 +0,0 @@
package main
import (
"context"
"errors"
"flag"
"fmt"
"os"
"runtime"
"strconv"
"strings"
"time"
"github.com/1Password/onepassword-operator/pkg/controller"
op "github.com/1Password/onepassword-operator/pkg/onepassword"
// Import all Kubernetes client auth plugins (e.g. Azure, GCP, OIDC, etc.)
_ "k8s.io/client-go/plugin/pkg/client/auth"
"k8s.io/client-go/rest"
"github.com/1Password/onepassword-operator/pkg/apis"
"github.com/1Password/onepassword-operator/version"
"github.com/1Password/connect-sdk-go/connect"
"github.com/operator-framework/operator-sdk/pkg/k8sutil"
kubemetrics "github.com/operator-framework/operator-sdk/pkg/kube-metrics"
"github.com/operator-framework/operator-sdk/pkg/leader"
"github.com/operator-framework/operator-sdk/pkg/log/zap"
"github.com/operator-framework/operator-sdk/pkg/metrics"
sdkVersion "github.com/operator-framework/operator-sdk/version"
"github.com/spf13/pflag"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/util/intstr"
"sigs.k8s.io/controller-runtime/pkg/cache"
"sigs.k8s.io/controller-runtime/pkg/client/config"
logf "sigs.k8s.io/controller-runtime/pkg/log"
"sigs.k8s.io/controller-runtime/pkg/manager"
"sigs.k8s.io/controller-runtime/pkg/manager/signals"
)
const envPollingIntervalVariable = "POLLING_INTERVAL"
const manageConnect = "MANAGE_CONNECT"
const restartDeploymentsEnvVariable = "AUTO_RESTART"
const defaultPollingInterval = 600
// Change below variables to serve metrics on different host or port.
var (
metricsHost = "0.0.0.0"
metricsPort int32 = 8383
operatorMetricsPort int32 = 8686
)
var log = logf.Log.WithName("cmd")
func printVersion() {
log.Info(fmt.Sprintf("Operator Version: %s", version.Version))
log.Info(fmt.Sprintf("Go Version: %s", runtime.Version()))
log.Info(fmt.Sprintf("Go OS/Arch: %s/%s", runtime.GOOS, runtime.GOARCH))
log.Info(fmt.Sprintf("Version of operator-sdk: %v", sdkVersion.Version))
}
func main() {
// Add the zap logger flag set to the CLI. The flag set must
// be added before calling pflag.Parse().
pflag.CommandLine.AddFlagSet(zap.FlagSet())
// Add flags registered by imported packages (e.g. glog and
// controller-runtime)
pflag.CommandLine.AddGoFlagSet(flag.CommandLine)
pflag.Parse()
// Use a zap logr.Logger implementation. If none of the zap
// flags are configured (or if the zap flag set is not being
// used), this defaults to a production zap logger.
//
// The logger instantiated here can be changed to any logger
// implementing the logr.Logger interface. This logger will
// be propagated through the whole operator, generating
// uniform and structured logs.
logf.SetLogger(zap.Logger())
printVersion()
namespace, err := k8sutil.GetWatchNamespace()
if err != nil {
log.Error(err, "Failed to get watch namespace")
os.Exit(1)
}
// Get a config to talk to the apiserver
cfg, err := config.GetConfig()
if err != nil {
log.Error(err, "")
os.Exit(1)
}
ctx := context.Background()
// Become the leader before proceeding
err = leader.Become(ctx, "onepassword-connect-operator-lock")
if err != nil {
log.Error(err, "")
os.Exit(1)
}
// Set default manager options
options := manager.Options{
Namespace: namespace,
MetricsBindAddress: fmt.Sprintf("%s:%d", metricsHost, metricsPort),
}
// Add support for MultiNamespace set in WATCH_NAMESPACE (e.g ns1,ns2)
// Note that this is not intended to be used for excluding namespaces, this is better done via a Predicate
// Also note that you may face performance issues when using this with a high number of namespaces.
if strings.Contains(namespace, ",") {
options.Namespace = ""
options.NewCache = cache.MultiNamespacedCacheBuilder(strings.Split(namespace, ","))
}
// Create a new manager to provide shared dependencies and start components
mgr, err := manager.New(cfg, options)
if err != nil {
log.Error(err, "")
os.Exit(1)
}
log.Info("Registering Components.")
// Setup Scheme for all resources
if err := apis.AddToScheme(mgr.GetScheme()); err != nil {
log.Error(err, "")
os.Exit(1)
}
//Setup 1PasswordConnect
if shouldManageConnect() {
log.Info("Automated Connect Management Enabled")
go func() {
connectStarted := false
for connectStarted == false {
err := op.SetupConnect(mgr.GetClient())
// Cache Not Started is an acceptable error. Retry until cache is started.
if err != nil && !errors.Is(err, &cache.ErrCacheNotStarted{}) {
log.Error(err, "")
os.Exit(1)
}
if err == nil {
connectStarted = true
}
}
}()
} else {
log.Info("Automated Connect Management Disabled")
}
// Setup One Password Client
opConnectClient, err := connect.NewClientFromEnvironment()
if err := controller.AddToManager(mgr, opConnectClient); err != nil {
log.Error(err, "")
os.Exit(1)
}
// Add the Metrics Service
addMetrics(ctx, cfg)
// Setup update secrets task
updatedSecretsPoller := op.NewManager(mgr.GetClient(), opConnectClient, shouldAutoRestartDeployments())
done := make(chan bool)
ticker := time.NewTicker(getPollingIntervalForUpdatingSecrets())
go func() {
for {
select {
case <-done:
ticker.Stop()
return
case <-ticker.C:
updatedSecretsPoller.UpdateKubernetesSecretsTask()
}
}
}()
// Start the Cmd
if err := mgr.Start(signals.SetupSignalHandler()); err != nil {
log.Error(err, "Manager exited non-zero")
done <- true
os.Exit(1)
}
}
// addMetrics will create the Services and Service Monitors to allow the operator export the metrics by using
// the Prometheus operator
func addMetrics(ctx context.Context, cfg *rest.Config) {
// Get the namespace the operator is currently deployed in.
operatorNs, err := k8sutil.GetOperatorNamespace()
if err != nil {
if errors.Is(err, k8sutil.ErrRunLocal) {
log.Info("Skipping CR metrics server creation; not running in a cluster.")
return
}
}
if err := serveCRMetrics(cfg, operatorNs); err != nil {
log.Info("Could not generate and serve custom resource metrics", "error", err.Error())
}
// Add to the below struct any other metrics ports you want to expose.
servicePorts := []v1.ServicePort{
{Port: metricsPort, Name: metrics.OperatorPortName, Protocol: v1.ProtocolTCP, TargetPort: intstr.IntOrString{Type: intstr.Int, IntVal: metricsPort}},
{Port: operatorMetricsPort, Name: metrics.CRPortName, Protocol: v1.ProtocolTCP, TargetPort: intstr.IntOrString{Type: intstr.Int, IntVal: operatorMetricsPort}},
}
// Create Service object to expose the metrics port(s).
service, err := metrics.CreateMetricsService(ctx, cfg, servicePorts)
if err != nil {
log.Info("Could not create metrics Service", "error", err.Error())
}
// CreateServiceMonitors will automatically create the prometheus-operator ServiceMonitor resources
// necessary to configure Prometheus to scrape metrics from this operator.
services := []*v1.Service{service}
// The ServiceMonitor is created in the same namespace where the operator is deployed
_, err = metrics.CreateServiceMonitors(cfg, operatorNs, services)
if err != nil {
log.Info("Could not create ServiceMonitor object", "error", err.Error())
// If this operator is deployed to a cluster without the prometheus-operator running, it will return
// ErrServiceMonitorNotPresent, which can be used to safely skip ServiceMonitor creation.
if err == metrics.ErrServiceMonitorNotPresent {
log.Info("Install prometheus-operator in your cluster to create ServiceMonitor objects", "error", err.Error())
}
}
}
// serveCRMetrics gets the Operator/CustomResource GVKs and generates metrics based on those types.
// It serves those metrics on "http://metricsHost:operatorMetricsPort".
func serveCRMetrics(cfg *rest.Config, operatorNs string) error {
// The function below returns a list of filtered operator/CR specific GVKs. For more control, override the GVK list below
// with your own custom logic. Note that if you are adding third party API schemas, probably you will need to
// customize this implementation to avoid permissions issues.
filteredGVK, err := k8sutil.GetGVKsFromAddToScheme(apis.AddToScheme)
if err != nil {
return err
}
// The metrics will be generated from the namespaces which are returned here.
// NOTE that passing nil or an empty list of namespaces in GenerateAndServeCRMetrics will result in an error.
ns, err := kubemetrics.GetNamespacesForMetrics(operatorNs)
if err != nil {
return err
}
// Generate and serve custom resource specific metrics.
err = kubemetrics.GenerateAndServeCRMetrics(cfg, ns, filteredGVK, metricsHost, operatorMetricsPort)
if err != nil {
return err
}
return nil
}
func getPollingIntervalForUpdatingSecrets() time.Duration {
timeInSecondsString, found := os.LookupEnv(envPollingIntervalVariable)
if found {
timeInSeconds, err := strconv.Atoi(timeInSecondsString)
if err == nil {
return time.Duration(timeInSeconds) * time.Second
}
log.Info("Invalid value set for polling interval. Must be a valid integer.")
}
log.Info(fmt.Sprintf("Using default polling interval of %v seconds", defaultPollingInterval))
return time.Duration(defaultPollingInterval) * time.Second
}
func shouldManageConnect() bool {
shouldManageConnect, found := os.LookupEnv(manageConnect)
if found {
shouldManageConnectBool, err := strconv.ParseBool(strings.ToLower(shouldManageConnect))
if err != nil {
log.Error(err, "")
os.Exit(1)
}
return shouldManageConnectBool
}
return false
}
func shouldAutoRestartDeployments() bool {
shouldAutoRestartDeployments, found := os.LookupEnv(restartDeploymentsEnvVariable)
if found {
shouldAutoRestartDeploymentsBool, err := strconv.ParseBool(strings.ToLower(shouldAutoRestartDeployments))
if err != nil {
log.Error(err, "")
os.Exit(1)
}
return shouldAutoRestartDeploymentsBool
}
return false
}

View File

@@ -39,4 +39,7 @@ spec:
status:
description: OnePasswordItemStatus defines the observed state of OnePasswordItem
type: object
type:
description: 'Kubernetes secret type. More info: https://kubernetes.io/docs/concepts/configuration/secret/#secret-types'
type: string
type: object

View File

@@ -2,7 +2,6 @@ apiVersion: apps/v1
kind: Deployment
metadata:
name: onepassword-connect
namespace: default
spec:
selector:
matchLabels:

View File

@@ -2,7 +2,6 @@ apiVersion: v1
kind: Service
metadata:
name: onepassword-connect
namespace: default
spec:
type: NodePort
selector:

View File

@@ -0,0 +1,21 @@
# This kustomization.yaml is not intended to be run by itself,
# since it depends on service name and namespace that are out of this kustomize package.
# It should be run by config/default
resources:
- bases/onepassword.onepassword.com_onepassworditems.yaml
#+kubebuilder:scaffold:crdkustomizeresource
patchesStrategicMerge:
# [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix.
# patches here are for enabling the conversion webhook for each CRD
#- patches/webhook_in_onepassworditems.yaml
#+kubebuilder:scaffold:crdkustomizewebhookpatch
# [CERTMANAGER] To enable cert-manager, uncomment all the sections with [CERTMANAGER] prefix.
# patches here are for enabling the CA injection for each CRD
#- patches/cainjection_in_onepassworditems.yaml
#+kubebuilder:scaffold:crdkustomizecainjectionpatch
# the following config is for teaching kustomize how to do kustomization for CRDs.
configurations:
- kustomizeconfig.yaml

View File

@@ -0,0 +1,19 @@
# This file is for teaching kustomize how to substitute name and namespace reference in CRD
nameReference:
- kind: Service
version: v1
fieldSpecs:
- kind: CustomResourceDefinition
version: v1
group: apiextensions.k8s.io
path: spec/conversion/webhook/clientConfig/service/name
namespace:
- kind: CustomResourceDefinition
version: v1
group: apiextensions.k8s.io
path: spec/conversion/webhook/clientConfig/service/namespace
create: false
varReference:
- path: metadata/annotations

View File

@@ -0,0 +1,7 @@
# The following patch adds a directive for certmanager to inject CA into the CRD
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
cert-manager.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME)
name: onepassworditems.onepassword.onepassword.com

View File

@@ -0,0 +1,16 @@
# The following patch enables a conversion webhook for the CRD
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: onepassworditems.onepassword.onepassword.com
spec:
conversion:
strategy: Webhook
webhook:
clientConfig:
service:
namespace: system
name: webhook-service
path: /convert
conversionReviewVersions:
- v1

View File

@@ -0,0 +1,74 @@
# Adds namespace to all resources.
namespace: onepassword-operator-new-system
# Value of this field is prepended to the
# names of all resources, e.g. a deployment named
# "wordpress" becomes "alices-wordpress".
# Note that it should also match with the prefix (text before '-') of the namespace
# field above.
namePrefix: onepassword-operator-new-
# Labels to add to all resources and selectors.
#commonLabels:
# someName: someValue
bases:
- ../crd
- ../rbac
- ../manager
# [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix including the one in
# crd/kustomization.yaml
#- ../webhook
# [CERTMANAGER] To enable cert-manager, uncomment all sections with 'CERTMANAGER'. 'WEBHOOK' components are required.
#- ../certmanager
# [PROMETHEUS] To enable prometheus monitor, uncomment all sections with 'PROMETHEUS'.
#- ../prometheus
patchesStrategicMerge:
# Protect the /metrics endpoint by putting it behind auth.
# If you want your controller-manager to expose the /metrics
# endpoint w/o any authn/z, please comment the following line.
- manager_auth_proxy_patch.yaml
# Mount the controller config file for loading manager configurations
# through a ComponentConfig type
#- manager_config_patch.yaml
# [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix including the one in
# crd/kustomization.yaml
#- manager_webhook_patch.yaml
# [CERTMANAGER] To enable cert-manager, uncomment all sections with 'CERTMANAGER'.
# Uncomment 'CERTMANAGER' sections in crd/kustomization.yaml to enable the CA injection in the admission webhooks.
# 'CERTMANAGER' needs to be enabled to use ca injection
#- webhookcainjection_patch.yaml
# the following config is for teaching kustomize how to do var substitution
vars:
# [CERTMANAGER] To enable cert-manager, uncomment all sections with 'CERTMANAGER' prefix.
#- name: CERTIFICATE_NAMESPACE # namespace of the certificate CR
# objref:
# kind: Certificate
# group: cert-manager.io
# version: v1
# name: serving-cert # this name should match the one in certificate.yaml
# fieldref:
# fieldpath: metadata.namespace
#- name: CERTIFICATE_NAME
# objref:
# kind: Certificate
# group: cert-manager.io
# version: v1
# name: serving-cert # this name should match the one in certificate.yaml
#- name: SERVICE_NAMESPACE # namespace of the service
# objref:
# kind: Service
# version: v1
# name: webhook-service
# fieldref:
# fieldpath: metadata.namespace
#- name: SERVICE_NAME
# objref:
# kind: Service
# version: v1
# name: webhook-service

View File

@@ -0,0 +1,34 @@
# This patch inject a sidecar container which is a HTTP proxy for the
# controller manager, it performs RBAC authorization against the Kubernetes API using SubjectAccessReviews.
apiVersion: apps/v1
kind: Deployment
metadata:
name: controller-manager
namespace: system
spec:
template:
spec:
containers:
- name: kube-rbac-proxy
image: gcr.io/kubebuilder/kube-rbac-proxy:v0.8.0
args:
- "--secure-listen-address=0.0.0.0:8443"
- "--upstream=http://127.0.0.1:8080/"
- "--logtostderr=true"
- "--v=0"
ports:
- containerPort: 8443
protocol: TCP
name: https
resources:
limits:
cpu: 500m
memory: 128Mi
requests:
cpu: 5m
memory: 64Mi
- name: manager
args:
- "--health-probe-bind-address=:8081"
- "--metrics-bind-address=127.0.0.1:8080"
- "--leader-elect"

View File

@@ -0,0 +1,20 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: controller-manager
namespace: system
spec:
template:
spec:
containers:
- name: manager
args:
- "--config=controller_manager_config.yaml"
volumeMounts:
- name: manager-config
mountPath: /controller_manager_config.yaml
subPath: controller_manager_config.yaml
volumes:
- name: manager-config
configMap:
name: manager-config

View File

@@ -0,0 +1,11 @@
apiVersion: controller-runtime.sigs.k8s.io/v1alpha1
kind: ControllerManagerConfig
health:
healthProbeBindAddress: :8081
metrics:
bindAddress: 127.0.0.1:8080
webhook:
port: 9443
leaderElection:
leaderElect: true
resourceName: c26807fd.onepassword.com

View File

@@ -0,0 +1,10 @@
resources:
- manager.yaml
generatorOptions:
disableNameSuffixHash: true
configMapGenerator:
- name: manager-config
files:
- controller_manager_config.yaml

View File

@@ -0,0 +1,60 @@
apiVersion: v1
kind: Namespace
metadata:
labels:
control-plane: controller-manager
name: system
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: controller-manager
namespace: system
labels:
control-plane: controller-manager
spec:
selector:
matchLabels:
control-plane: controller-manager
replicas: 1
template:
metadata:
annotations:
kubectl.kubernetes.io/default-container: manager
labels:
control-plane: controller-manager
spec:
securityContext:
runAsNonRoot: true
containers:
- command:
- /manager
args:
- --leader-elect
image: controller:latest
name: manager
securityContext:
allowPrivilegeEscalation: false
livenessProbe:
httpGet:
path: /healthz
port: 8081
initialDelaySeconds: 15
periodSeconds: 20
readinessProbe:
httpGet:
path: /readyz
port: 8081
initialDelaySeconds: 5
periodSeconds: 10
# TODO(user): Configure the resources accordingly based on the project requirements.
# More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
resources:
limits:
cpu: 500m
memory: 128Mi
requests:
cpu: 10m
memory: 64Mi
serviceAccountName: controller-manager
terminationGracePeriodSeconds: 10

View File

@@ -0,0 +1,27 @@
# These resources constitute the fully configured set of manifests
# used to generate the 'manifests/' directory in a bundle.
resources:
- bases/onepassword-operator-new.clusterserviceversion.yaml
- ../default
- ../samples
- ../scorecard
# [WEBHOOK] To enable webhooks, uncomment all the sections with [WEBHOOK] prefix.
# Do NOT uncomment sections with prefix [CERTMANAGER], as OLM does not support cert-manager.
# These patches remove the unnecessary "cert" volume and its manager container volumeMount.
#patchesJson6902:
#- target:
# group: apps
# version: v1
# kind: Deployment
# name: controller-manager
# namespace: system
# patch: |-
# # Remove the manager container's "cert" volumeMount, since OLM will create and mount a set of certs.
# # Update the indices in this path if adding or removing containers/volumeMounts in the manager's Deployment.
# - op: remove
# path: /spec/template/spec/containers/1/volumeMounts/0
# # Remove the "cert" volume, since OLM will create and mount a set of certs.
# # Update the indices in this path if adding or removing volumes in the manager's Deployment.
# - op: remove
# path: /spec/template/spec/volumes/0

View File

@@ -0,0 +1,2 @@
resources:
- monitor.yaml

View File

@@ -0,0 +1,20 @@
# Prometheus Monitor Service (Metrics)
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
labels:
control-plane: controller-manager
name: controller-manager-metrics-monitor
namespace: system
spec:
endpoints:
- path: /metrics
port: https
scheme: https
bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token
tlsConfig:
insecureSkipVerify: true
selector:
matchLabels:
control-plane: controller-manager

View File

@@ -0,0 +1,9 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: metrics-reader
rules:
- nonResourceURLs:
- "/metrics"
verbs:
- get

View File

@@ -0,0 +1,17 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: proxy-role
rules:
- apiGroups:
- authentication.k8s.io
resources:
- tokenreviews
verbs:
- create
- apiGroups:
- authorization.k8s.io
resources:
- subjectaccessreviews
verbs:
- create

View File

@@ -0,0 +1,12 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: proxy-rolebinding
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: proxy-role
subjects:
- kind: ServiceAccount
name: controller-manager
namespace: system

View File

@@ -0,0 +1,15 @@
apiVersion: v1
kind: Service
metadata:
labels:
control-plane: controller-manager
name: controller-manager-metrics-service
namespace: system
spec:
ports:
- name: https
port: 8443
protocol: TCP
targetPort: https
selector:
control-plane: controller-manager

View File

@@ -0,0 +1,18 @@
resources:
# All RBAC will be applied under this service account in
# the deployment namespace. You may comment out this resource
# if your manager will use a service account that exists at
# runtime. Be sure to update RoleBinding and ClusterRoleBinding
# subjects if changing service account names.
- service_account.yaml
- role.yaml
- role_binding.yaml
- leader_election_role.yaml
- leader_election_role_binding.yaml
# Comment the following 4 lines if you want to disable
# the auth proxy (https://github.com/brancz/kube-rbac-proxy)
# which protects your /metrics endpoint.
- auth_proxy_service.yaml
- auth_proxy_role.yaml
- auth_proxy_role_binding.yaml
- auth_proxy_client_clusterrole.yaml

View File

@@ -0,0 +1,37 @@
# permissions to do leader election.
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: leader-election-role
rules:
- apiGroups:
- ""
resources:
- configmaps
verbs:
- get
- list
- watch
- create
- update
- patch
- delete
- apiGroups:
- coordination.k8s.io
resources:
- leases
verbs:
- get
- list
- watch
- create
- update
- patch
- delete
- apiGroups:
- ""
resources:
- events
verbs:
- create
- patch

View File

@@ -0,0 +1,12 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: leader-election-rolebinding
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: leader-election-role
subjects:
- kind: ServiceAccount
name: controller-manager
namespace: system

View File

@@ -0,0 +1,24 @@
# permissions for end users to edit onepassworditems.
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: onepassworditem-editor-role
rules:
- apiGroups:
- onepassword.onepassword.com
resources:
- onepassworditems
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- onepassword.onepassword.com
resources:
- onepassworditems/status
verbs:
- get

View File

@@ -0,0 +1,20 @@
# permissions for end users to view onepassworditems.
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: onepassworditem-viewer-role
rules:
- apiGroups:
- onepassword.onepassword.com
resources:
- onepassworditems
verbs:
- get
- list
- watch
- apiGroups:
- onepassword.onepassword.com
resources:
- onepassworditems/status
verbs:
- get

View File

@@ -0,0 +1,12 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: manager-rolebinding
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: manager-role
subjects:
- kind: ServiceAccount
name: controller-manager
namespace: system

View File

@@ -0,0 +1,5 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: controller-manager
namespace: system

View File

@@ -0,0 +1,4 @@
## Append samples you want in your CSV to this file as resources ##
resources:
- onepassword_v1_onepassworditem.yaml
#+kubebuilder:scaffold:manifestskustomizesamples

View File

@@ -0,0 +1,6 @@
apiVersion: onepassword.onepassword.com/v1
kind: OnePasswordItem
metadata:
name: onepassworditem-sample
spec:
# TODO(user): Add fields here

View File

@@ -0,0 +1,7 @@
apiVersion: scorecard.operatorframework.io/v1alpha3
kind: Configuration
metadata:
name: config
stages:
- parallel: true
tests: []

View File

@@ -0,0 +1,16 @@
resources:
- bases/config.yaml
patchesJson6902:
- path: patches/basic.config.yaml
target:
group: scorecard.operatorframework.io
version: v1alpha3
kind: Configuration
name: config
- path: patches/olm.config.yaml
target:
group: scorecard.operatorframework.io
version: v1alpha3
kind: Configuration
name: config
#+kubebuilder:scaffold:patchesJson6902

View File

@@ -0,0 +1,10 @@
- op: add
path: /stages/0/tests/-
value:
entrypoint:
- scorecard-test
- basic-check-spec
image: quay.io/operator-framework/scorecard-test:v1.19.0
labels:
suite: basic
test: basic-check-spec-test

View File

@@ -0,0 +1,50 @@
- op: add
path: /stages/0/tests/-
value:
entrypoint:
- scorecard-test
- olm-bundle-validation
image: quay.io/operator-framework/scorecard-test:v1.19.0
labels:
suite: olm
test: olm-bundle-validation-test
- op: add
path: /stages/0/tests/-
value:
entrypoint:
- scorecard-test
- olm-crds-have-validation
image: quay.io/operator-framework/scorecard-test:v1.19.0
labels:
suite: olm
test: olm-crds-have-validation-test
- op: add
path: /stages/0/tests/-
value:
entrypoint:
- scorecard-test
- olm-crds-have-resources
image: quay.io/operator-framework/scorecard-test:v1.19.0
labels:
suite: olm
test: olm-crds-have-resources-test
- op: add
path: /stages/0/tests/-
value:
entrypoint:
- scorecard-test
- olm-spec-descriptors
image: quay.io/operator-framework/scorecard-test:v1.19.0
labels:
suite: olm
test: olm-spec-descriptors-test
- op: add
path: /stages/0/tests/-
value:
entrypoint:
- scorecard-test
- olm-status-descriptors
image: quay.io/operator-framework/scorecard-test:v1.19.0
labels:
suite: olm
test: olm-status-descriptors-test

View File

@@ -1,4 +1,4 @@
package deployment
package controllers
import (
"context"
@@ -14,9 +14,11 @@ import (
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/client/apiutil"
"sigs.k8s.io/controller-runtime/pkg/controller"
"sigs.k8s.io/controller-runtime/pkg/handler"
logf "sigs.k8s.io/controller-runtime/pkg/log"
@@ -25,7 +27,7 @@ import (
"sigs.k8s.io/controller-runtime/pkg/source"
)
var log = logf.Log.WithName("controller_deployment")
var deploymentLog = logf.Log.WithName("controller_deployment")
var finalizer = "onepassword.com/finalizer.secret"
const annotationRegExpString = "^operator.1password.io\\/[a-zA-Z\\.]+"
@@ -69,9 +71,23 @@ type ReconcileDeployment struct {
}
func (r *ReconcileDeployment) SetupWithManager(mgr ctrl.Manager) error {
return ctrl.NewControllerManagedBy(mgr).
For(&appsv1.Deployment{}).
Complete(r)
c, err := controller.New("deployment-controller", mgr, controller.Options{Reconciler: r})
if err != nil {
return err
}
// Watch for changes to primary resource Deployment
err = c.Watch(&source.Kind{Type: &appsv1.Deployment{}}, &handler.EnqueueRequestForObject{})
if err != nil {
return err
}
return nil
// TODO figure out what to do with this code.
// return ctrl.NewControllerManagedBy(mgr).
// For(&appsv1.Deployment{}).
// Complete(r)
}
func (r *ReconcileDeployment) test() {
@@ -83,9 +99,8 @@ func (r *ReconcileDeployment) test() {
// Note:
// The Controller will requeue the Request to be processed again if the returned error is non-nil or
// Result.Requeue is true, otherwise upon completion it will remove the work from the queue.
func (r *ReconcileDeployment) Reconcile(request reconcile.Request) (reconcile.Result, error) {
ctx := context.Background()
reqLogger := log.WithValues("Request.Namespace", request.Namespace, "Request.Name", request.Name)
func (r *ReconcileDeployment) Reconcile(ctx context.Context, request reconcile.Request) (reconcile.Result, error) {
reqLogger := deploymentLog.WithValues("Request.Namespace", request.Namespace, "Request.Name", request.Name)
reqLogger.Info("Reconciling Deployment")
deployment := &appsv1.Deployment{}
@@ -114,7 +129,7 @@ func (r *ReconcileDeployment) Reconcile(request reconcile.Request) (reconcile.Re
}
}
// Handles creation or updating secrets for deployment if needed
if err := r.HandleApplyingDeployment(deployment.Namespace, annotations, request); err != nil {
if err := r.HandleApplyingDeployment(deployment, deployment.Namespace, annotations, request); err != nil {
return reconcile.Result{}, err
}
return reconcile.Result{}, nil
@@ -168,7 +183,7 @@ func (r *ReconcileDeployment) areMultipleDeploymentsUsingSecret(updatedSecrets m
err := r.kubeClient.List(context.Background(), deployments, opts...)
if err != nil {
log.Error(err, "Failed to list kubernetes deployments")
deploymentLog.Error(err, "Failed to list kubernetes deployments")
return false, err
}
@@ -187,10 +202,13 @@ func (r *ReconcileDeployment) removeOnePasswordFinalizerFromDeployment(deploymen
return r.kubeClient.Update(context.Background(), deployment)
}
func (r *ReconcileDeployment) HandleApplyingDeployment(namespace string, annotations map[string]string, request reconcile.Request) error {
reqLog := log.WithValues("Request.Namespace", request.Namespace, "Request.Name", request.Name)
func (r *ReconcileDeployment) HandleApplyingDeployment(deployment *appsv1.Deployment, namespace string, annotations map[string]string, request reconcile.Request) error {
reqLog := deploymentLog.WithValues("Request.Namespace", request.Namespace, "Request.Name", request.Name)
secretName := annotations[op.NameAnnotation]
secretLabels := map[string]string(nil)
secretType := ""
if len(secretName) == 0 {
reqLog.Info("No 'item-name' annotation set. 'item-path' and 'item-name' must be set as annotations to add new secret.")
return nil
@@ -201,5 +219,17 @@ func (r *ReconcileDeployment) HandleApplyingDeployment(namespace string, annotat
return fmt.Errorf("Failed to retrieve item: %v", err)
}
return kubeSecrets.CreateKubernetesSecretFromItem(r.kubeClient, secretName, namespace, item, annotations[op.RestartDeploymentsAnnotation])
// Create owner reference.
gvk, err := apiutil.GVKForObject(deployment, r.scheme)
if err != nil {
return fmt.Errorf("could not to retrieve group version kind: %v", err)
}
ownerRef := &metav1.OwnerReference{
APIVersion: gvk.GroupVersion().String(),
Kind: gvk.Kind,
Name: deployment.GetName(),
UID: deployment.GetUID(),
}
return kubeSecrets.CreateKubernetesSecretFromItem(r.kubeClient, secretName, namespace, item, annotations[op.RestartDeploymentsAnnotation], secretLabels, secretType, annotations, ownerRef)
}

View File

@@ -0,0 +1,183 @@
/*
Copyright 2022.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package controllers
import (
"context"
"fmt"
"github.com/1Password/onepassword-operator/pkg/onepassword"
op "github.com/1Password/onepassword-operator/pkg/onepassword"
kubeSecrets "github.com/1Password/onepassword-operator/pkg/kubernetessecrets"
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/runtime"
ctrl "sigs.k8s.io/controller-runtime"
kubeClient "sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller"
"sigs.k8s.io/controller-runtime/pkg/handler"
"sigs.k8s.io/controller-runtime/pkg/source"
"sigs.k8s.io/controller-runtime/pkg/client/apiutil"
logf "sigs.k8s.io/controller-runtime/pkg/log"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
"github.com/1Password/onepassword-operator/pkg/utils"
"github.com/1Password/connect-sdk-go/connect"
corev1 "k8s.io/api/core/v1"
onepasswordv1 "github.com/1Password/onepassword-operator/api/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
var log = logf.Log.WithName("controller_onepassworditem")
// OnePasswordItemReconciler reconciles a OnePasswordItem object
type OnePasswordItemReconciler struct {
Client kubeClient.Client
Scheme *runtime.Scheme
OpConnectClient connect.Client
}
//+kubebuilder:rbac:groups=onepassword.onepassword.com,resources=onepassworditems,verbs=get;list;watch;create;update;patch;delete
//+kubebuilder:rbac:groups=onepassword.onepassword.com,resources=onepassworditems/status,verbs=get;update;patch
//+kubebuilder:rbac:groups=onepassword.onepassword.com,resources=onepassworditems/finalizers,verbs=update
// Reconcile is part of the main kubernetes reconciliation loop which aims to
// move the current state of the cluster closer to the desired state.
// TODO(user): Modify the Reconcile function to compare the state specified by
// the OnePasswordItem object against the actual cluster state, and then
// perform operations to make the cluster state reflect the state specified by
// the user.
//
// For more details, check Reconcile and its Result here:
// - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.11.0/pkg/reconcile
func (r *OnePasswordItemReconciler) Reconcile(ctx context.Context, request ctrl.Request) (ctrl.Result, error) {
reqLogger := log.WithValues("Request.Namespace", request.Namespace, "Request.Name", request.Name)
reqLogger.Info("Reconciling OnePasswordItem")
onepassworditem := &onepasswordv1.OnePasswordItem{}
err := r.Client.Get(context.Background(), request.NamespacedName, onepassworditem)
if err != nil {
if errors.IsNotFound(err) {
return reconcile.Result{}, nil
}
return reconcile.Result{}, err
}
// If the deployment is not being deleted
if onepassworditem.ObjectMeta.DeletionTimestamp.IsZero() {
// Adds a finalizer to the deployment if one does not exist.
// This is so we can handle cleanup of associated secrets properly
if !utils.ContainsString(onepassworditem.ObjectMeta.Finalizers, finalizer) {
onepassworditem.ObjectMeta.Finalizers = append(onepassworditem.ObjectMeta.Finalizers, finalizer)
if err := r.Client.Update(context.Background(), onepassworditem); err != nil {
return reconcile.Result{}, err
}
}
// Handles creation or updating secrets for deployment if needed
if err := r.HandleOnePasswordItem(onepassworditem, request); err != nil {
return reconcile.Result{}, err
}
return reconcile.Result{}, nil
}
// If one password finalizer exists then we must cleanup associated secrets
if utils.ContainsString(onepassworditem.ObjectMeta.Finalizers, finalizer) {
// Delete associated kubernetes secret
if err = r.cleanupKubernetesSecret(onepassworditem); err != nil {
return reconcile.Result{}, err
}
// Remove finalizer now that cleanup is complete
if err := r.removeFinalizer(onepassworditem); err != nil {
return reconcile.Result{}, err
}
}
return reconcile.Result{}, nil
}
// SetupWithManager sets up the controller with the Manager.
func (r *OnePasswordItemReconciler) SetupWithManager(mgr ctrl.Manager) error {
c, err := controller.New("onepassworditem-controller", mgr, controller.Options{Reconciler: r})
if err != nil {
return err
}
// Watch for changes to primary resource OnePasswordItem
err = c.Watch(&source.Kind{Type: &onepasswordv1.OnePasswordItem{}}, &handler.EnqueueRequestForObject{})
if err != nil {
return err
}
return nil
// TODO Consider the simplified code below. Based on the migration guide: https://sdk.operatorframework.io/docs/building-operators/golang/migration/#create-a-new-project
// return ctrl.NewControllerManagedBy(mgr).Named("onepassworditem-controller").WithOptions(controller.Options{Reconciler: r}).
// For(&onepasswordv1.OnePasswordItem{}).Watches(&source.Kind{Type: &onepasswordv1.OnePasswordItem{}}, &handler.EnqueueRequestForObject{}).
// Complete(r)
}
func (r *OnePasswordItemReconciler) removeFinalizer(onePasswordItem *onepasswordv1.OnePasswordItem) error {
onePasswordItem.ObjectMeta.Finalizers = utils.RemoveString(onePasswordItem.ObjectMeta.Finalizers, finalizer)
if err := r.Client.Update(context.Background(), onePasswordItem); err != nil {
return err
}
return nil
}
func (r *OnePasswordItemReconciler) cleanupKubernetesSecret(onePasswordItem *onepasswordv1.OnePasswordItem) error {
kubernetesSecret := &corev1.Secret{}
kubernetesSecret.ObjectMeta.Name = onePasswordItem.Name
kubernetesSecret.ObjectMeta.Namespace = onePasswordItem.Namespace
r.Client.Delete(context.Background(), kubernetesSecret)
if err := r.Client.Delete(context.Background(), kubernetesSecret); err != nil {
if !errors.IsNotFound(err) {
return err
}
}
return nil
}
func (r *OnePasswordItemReconciler) HandleOnePasswordItem(resource *onepasswordv1.OnePasswordItem, request reconcile.Request) error {
secretName := resource.GetName()
labels := resource.Labels
annotations := resource.Annotations
secretType := resource.Type
autoRestart := annotations[op.RestartDeploymentsAnnotation]
item, err := onepassword.GetOnePasswordItemByPath(r.OpConnectClient, resource.Spec.ItemPath)
if err != nil {
return fmt.Errorf("Failed to retrieve item: %v", err)
}
// Create owner reference.
gvk, err := apiutil.GVKForObject(resource, r.Scheme)
if err != nil {
return fmt.Errorf("could not to retrieve group version kind: %v", err)
}
ownerRef := &metav1.OwnerReference{
APIVersion: gvk.GroupVersion().String(),
Kind: gvk.Kind,
Name: resource.GetName(),
UID: resource.GetUID(),
}
return kubeSecrets.CreateKubernetesSecretFromItem(r.Client, secretName, resource.Namespace, item, autoRestart, labels, secretType, annotations, ownerRef)
}

80
controllers/suite_test.go Normal file
View File

@@ -0,0 +1,80 @@
/*
Copyright 2022.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package controllers
import (
"path/filepath"
"testing"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"k8s.io/client-go/kubernetes/scheme"
"k8s.io/client-go/rest"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/envtest"
"sigs.k8s.io/controller-runtime/pkg/envtest/printer"
logf "sigs.k8s.io/controller-runtime/pkg/log"
"sigs.k8s.io/controller-runtime/pkg/log/zap"
onepasswordv1 "github.com/1Password/onepassword-operator/api/v1"
//+kubebuilder:scaffold:imports
)
// These tests use Ginkgo (BDD-style Go testing framework). Refer to
// http://onsi.github.io/ginkgo/ to learn more about Ginkgo.
var cfg *rest.Config
var k8sClient client.Client
var testEnv *envtest.Environment
func TestAPIs(t *testing.T) {
RegisterFailHandler(Fail)
RunSpecsWithDefaultAndCustomReporters(t,
"Controller Suite",
[]Reporter{printer.NewlineReporter{}})
}
var _ = BeforeSuite(func() {
logf.SetLogger(zap.New(zap.WriteTo(GinkgoWriter), zap.UseDevMode(true)))
By("bootstrapping test environment")
testEnv = &envtest.Environment{
CRDDirectoryPaths: []string{filepath.Join("..", "config", "crd", "bases")},
ErrorIfCRDPathMissing: true,
}
cfg, err := testEnv.Start()
Expect(err).NotTo(HaveOccurred())
Expect(cfg).NotTo(BeNil())
err = onepasswordv1.AddToScheme(scheme.Scheme)
Expect(err).NotTo(HaveOccurred())
//+kubebuilder:scaffold:scheme
k8sClient, err = client.New(cfg, client.Options{Scheme: scheme.Scheme})
Expect(err).NotTo(HaveOccurred())
Expect(k8sClient).NotTo(BeNil())
}, 60)
var _ = AfterSuite(func() {
By("tearing down the test environment")
err := testEnv.Stop()
Expect(err).NotTo(HaveOccurred())
})

88
go.mod
View File

@@ -1,21 +1,81 @@
module github.com/1Password/onepassword-operator
go 1.13
go 1.17
require (
github.com/1Password/connect-sdk-go v1.0.1
github.com/operator-framework/operator-sdk v0.19.0
github.com/prometheus/common v0.14.0 // indirect
github.com/spf13/pflag v1.0.5
github.com/stretchr/testify v1.6.1
k8s.io/api v0.18.2
k8s.io/apimachinery v0.18.2
k8s.io/client-go v12.0.0+incompatible
k8s.io/kubectl v0.18.2
sigs.k8s.io/controller-runtime v0.6.0
github.com/1Password/connect-sdk-go v1.2.0
github.com/onsi/ginkgo v1.16.5
github.com/onsi/gomega v1.17.0
github.com/stretchr/testify v1.7.0
k8s.io/api v0.23.5
k8s.io/apimachinery v0.23.5
k8s.io/client-go v0.23.5
k8s.io/kubectl v0.23.5
sigs.k8s.io/controller-runtime v0.11.0
)
replace (
github.com/Azure/go-autorest => github.com/Azure/go-autorest v13.3.2+incompatible // Required by OLM
k8s.io/client-go => k8s.io/client-go v0.18.2 // Required by prometheus-operator
require (
cloud.google.com/go v0.81.0 // indirect
github.com/Azure/go-autorest v14.2.0+incompatible // indirect
github.com/Azure/go-autorest/autorest v0.11.18 // indirect
github.com/Azure/go-autorest/autorest/adal v0.9.13 // indirect
github.com/Azure/go-autorest/autorest/date v0.3.0 // indirect
github.com/Azure/go-autorest/logger v0.2.1 // indirect
github.com/Azure/go-autorest/tracing v0.6.0 // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/cespare/xxhash/v2 v2.1.1 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/evanphx/json-patch v4.12.0+incompatible // indirect
github.com/form3tech-oss/jwt-go v3.2.3+incompatible // indirect
github.com/fsnotify/fsnotify v1.5.1 // indirect
github.com/go-logr/logr v1.2.0 // indirect
github.com/go-logr/zapr v1.2.0 // indirect
github.com/gogo/protobuf v1.3.2 // indirect
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
github.com/golang/protobuf v1.5.2 // indirect
github.com/google/go-cmp v0.5.5 // indirect
github.com/google/gofuzz v1.1.0 // indirect
github.com/google/uuid v1.1.2 // indirect
github.com/googleapis/gnostic v0.5.5 // indirect
github.com/imdario/mergo v0.3.12 // indirect
github.com/json-iterator/go v1.1.12 // indirect
github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 // indirect
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
github.com/modern-go/reflect2 v1.0.2 // indirect
github.com/nxadm/tail v1.4.8 // indirect
github.com/opentracing/opentracing-go v1.2.0 // indirect
github.com/pkg/errors v0.9.1 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/prometheus/client_golang v1.11.0 // indirect
github.com/prometheus/client_model v0.2.0 // indirect
github.com/prometheus/common v0.28.0 // indirect
github.com/prometheus/procfs v0.6.0 // indirect
github.com/spf13/pflag v1.0.5 // indirect
github.com/uber/jaeger-client-go v2.25.0+incompatible // indirect
github.com/uber/jaeger-lib v2.4.0+incompatible // indirect
go.uber.org/atomic v1.7.0 // indirect
go.uber.org/multierr v1.6.0 // indirect
go.uber.org/zap v1.19.1 // indirect
golang.org/x/crypto v0.0.0-20210817164053-32db794688a5 // indirect
golang.org/x/net v0.0.0-20211209124913-491a49abca63 // indirect
golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f // indirect
golang.org/x/sys v0.0.0-20211029165221-6e7872819dc8 // indirect
golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b // indirect
golang.org/x/text v0.3.7 // indirect
golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac // indirect
gomodules.xyz/jsonpatch/v2 v2.2.0 // indirect
google.golang.org/appengine v1.6.7 // indirect
google.golang.org/protobuf v1.27.1 // indirect
gopkg.in/inf.v0 v0.9.1 // indirect
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect
k8s.io/apiextensions-apiserver v0.23.0 // indirect
k8s.io/component-base v0.23.5 // indirect
k8s.io/klog/v2 v2.30.0 // indirect
k8s.io/kube-openapi v0.0.0-20211115234752-e816edb12b65 // indirect
k8s.io/utils v0.0.0-20211116205334-6203023598ed // indirect
sigs.k8s.io/json v0.0.0-20211020170558-c049b76a60c6 // indirect
sigs.k8s.io/structured-merge-diff/v4 v4.2.1 // indirect
sigs.k8s.io/yaml v1.3.0 // indirect
)

1471
go.sum

File diff suppressed because it is too large Load Diff

View File

@@ -1,4 +1,5 @@
Copyright 2011-2016 Canonical Ltd.
/*
Copyright 2022.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -11,3 +12,4 @@ distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/

254
main.go Normal file
View File

@@ -0,0 +1,254 @@
/*
Copyright 2022.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package main
import (
"errors"
"flag"
"fmt"
"os"
"runtime"
"strconv"
"strings"
"time"
"github.com/1Password/connect-sdk-go/connect"
op "github.com/1Password/onepassword-operator/pkg/onepassword"
"github.com/1Password/onepassword-operator/pkg/utils"
"github.com/1Password/onepassword-operator/version"
"sigs.k8s.io/controller-runtime/pkg/cache"
"sigs.k8s.io/controller-runtime/pkg/manager/signals"
// sdkVersion "github.com/operator-framework/operator-sdk/version"
// Import all Kubernetes client auth plugins (e.g. Azure, GCP, OIDC, etc.)
// to ensure that exec-entrypoint and run can make use of them.
_ "k8s.io/client-go/plugin/pkg/client/auth"
k8sruntime "k8s.io/apimachinery/pkg/runtime"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/healthz"
"sigs.k8s.io/controller-runtime/pkg/log/zap"
onepasswordv1 "github.com/1Password/onepassword-operator/api/v1"
"github.com/1Password/onepassword-operator/controllers"
//+kubebuilder:scaffold:imports
)
var (
scheme = k8sruntime.NewScheme()
setupLog = ctrl.Log.WithName("setup")
WatchNamespaceEnvVar = "WATCH_NAMESPACE"
)
func init() {
utilruntime.Must(clientgoscheme.AddToScheme(scheme))
utilruntime.Must(onepasswordv1.AddToScheme(scheme))
//+kubebuilder:scaffold:scheme
}
func printVersion() {
setupLog.Info(fmt.Sprintf("Operator Version: %s", version.Version))
setupLog.Info(fmt.Sprintf("Go Version: %s", runtime.Version()))
setupLog.Info(fmt.Sprintf("Go OS/Arch: %s/%s", runtime.GOOS, runtime.GOARCH))
// TODO figure out how to get operator-sdk version
// setupLog.Info(fmt.Sprintf("Version of operator-sdk: %v", sdkVersion.Version))
}
func main() {
var metricsAddr string
var enableLeaderElection bool
var probeAddr string
flag.StringVar(&metricsAddr, "metrics-bind-address", ":8080", "The address the metric endpoint binds to.")
flag.StringVar(&probeAddr, "health-probe-bind-address", ":8081", "The address the probe endpoint binds to.")
flag.BoolVar(&enableLeaderElection, "leader-elect", false,
"Enable leader election for controller manager. "+
"Enabling this will ensure there is only one active controller manager.")
opts := zap.Options{
Development: true,
}
opts.BindFlags(flag.CommandLine)
flag.Parse()
ctrl.SetLogger(zap.New(zap.UseFlagOptions(&opts)))
printVersion()
namespace := os.Getenv(WatchNamespaceEnvVar)
options := ctrl.Options{
Scheme: scheme,
Namespace: namespace,
MetricsBindAddress: metricsAddr,
Port: 9443,
HealthProbeBindAddress: probeAddr,
LeaderElection: enableLeaderElection,
LeaderElectionID: "c26807fd.onepassword.com",
}
// Add support for MultiNamespace set in WATCH_NAMESPACE (e.g ns1,ns2)
// Note that this is not intended to be used for excluding namespaces, this is better done via a Predicate
// Also note that you may face performance issues when using this with a high number of namespaces.
if strings.Contains(namespace, ",") {
options.Namespace = ""
options.NewCache = cache.MultiNamespacedCacheBuilder(strings.Split(namespace, ","))
}
mgr, err := ctrl.NewManager(ctrl.GetConfigOrDie(), options)
if err != nil {
setupLog.Error(err, "unable to start manager")
os.Exit(1)
}
// Setup One Password Client
opConnectClient, err := connect.NewClientFromEnvironment()
if err != nil {
setupLog.Error(err, "failed to create 1Password client")
os.Exit(1)
}
if err = (&controllers.OnePasswordItemReconciler{
Client: mgr.GetClient(),
Scheme: mgr.GetScheme(),
OpConnectClient: opConnectClient,
}).SetupWithManager(mgr); err != nil {
setupLog.Error(err, "unable to create controller", "controller", "OnePasswordItem")
os.Exit(1)
}
//+kubebuilder:scaffold:builder
if err := mgr.AddHealthzCheck("healthz", healthz.Ping); err != nil {
setupLog.Error(err, "unable to set up health check")
os.Exit(1)
}
if err := mgr.AddReadyzCheck("readyz", healthz.Ping); err != nil {
setupLog.Error(err, "unable to set up ready check")
os.Exit(1)
}
setupLog.Info("starting manager")
if err := mgr.Start(ctrl.SetupSignalHandler()); err != nil {
setupLog.Error(err, "problem running manager")
os.Exit(1)
}
deploymentNamespace, err := utils.GetOperatorNamespace()
if err != nil {
setupLog.Error(err, "Failed to get namespace")
os.Exit(1)
}
//Setup 1PasswordConnect
if shouldManageConnect() {
setupLog.Info("Automated Connect Management Enabled")
go func() {
connectStarted := false
for !connectStarted {
err := op.SetupConnect(mgr.GetClient(), deploymentNamespace)
// Cache Not Started is an acceptable error. Retry until cache is started.
if err != nil && !errors.Is(err, &cache.ErrCacheNotStarted{}) {
setupLog.Error(err, "")
os.Exit(1)
}
if err == nil {
connectStarted = true
}
}
}()
} else {
setupLog.Info("Automated Connect Management Disabled")
}
// TODO: Configure Metrics Service. See: https://sdk.operatorframework.io/docs/building-operators/golang/migration/#export-metrics
// Setup update secrets task
updatedSecretsPoller := op.NewManager(mgr.GetClient(), opConnectClient, shouldAutoRestartDeployments())
done := make(chan bool)
ticker := time.NewTicker(getPollingIntervalForUpdatingSecrets())
go func() {
for {
select {
case <-done:
ticker.Stop()
return
case <-ticker.C:
err := updatedSecretsPoller.UpdateKubernetesSecretsTask()
if err != nil {
setupLog.Error(err, "error running update kubernetes secret task")
}
}
}
}()
// Start the Cmd
if err := mgr.Start(signals.SetupSignalHandler()); err != nil {
setupLog.Error(err, "Manager exited non-zero")
done <- true
os.Exit(1)
}
}
const manageConnect = "MANAGE_CONNECT"
func shouldManageConnect() bool {
shouldManageConnect, found := os.LookupEnv(manageConnect)
if found {
shouldManageConnectBool, err := strconv.ParseBool(strings.ToLower(shouldManageConnect))
if err != nil {
setupLog.Error(err, "")
os.Exit(1)
}
return shouldManageConnectBool
}
return false
}
const envPollingIntervalVariable = "POLLING_INTERVAL"
const defaultPollingInterval = 600
func getPollingIntervalForUpdatingSecrets() time.Duration {
timeInSecondsString, found := os.LookupEnv(envPollingIntervalVariable)
if found {
timeInSeconds, err := strconv.Atoi(timeInSecondsString)
if err == nil {
return time.Duration(timeInSeconds) * time.Second
}
setupLog.Info("Invalid value set for polling interval. Must be a valid integer.")
}
setupLog.Info(fmt.Sprintf("Using default polling interval of %v seconds", defaultPollingInterval))
return time.Duration(defaultPollingInterval) * time.Second
}
const restartDeploymentsEnvVariable = "AUTO_RESTART"
func shouldAutoRestartDeployments() bool {
shouldAutoRestartDeployments, found := os.LookupEnv(restartDeploymentsEnvVariable)
if found {
shouldAutoRestartDeploymentsBool, err := strconv.ParseBool(strings.ToLower(shouldAutoRestartDeployments))
if err != nil {
setupLog.Error(err, "")
os.Exit(1)
}
return shouldAutoRestartDeploymentsBool
}
return false
}

View File

@@ -1,10 +0,0 @@
package apis
import (
v1 "github.com/1Password/onepassword-operator/pkg/apis/onepassword/v1"
)
func init() {
// Register the types with the Scheme so the components can map objects to GroupVersionKinds and back
AddToSchemes = append(AddToSchemes, v1.SchemeBuilder.AddToScheme)
}

View File

@@ -1,13 +0,0 @@
package apis
import (
"k8s.io/apimachinery/pkg/runtime"
)
// AddToSchemes may be used to add all resources defined in the project to a Scheme
var AddToSchemes runtime.SchemeBuilder
// AddToScheme adds all Resources to the Scheme
func AddToScheme(s *runtime.Scheme) error {
return AddToSchemes.AddToScheme(s)
}

View File

@@ -1,6 +0,0 @@
// Package onepassword contains onepassword API versions.
//
// This file ensures Go source parsers acknowledge the onepassword package
// and any child packages. It can be removed if any other Go source files are
// added to this package.
package onepassword

View File

@@ -1,4 +0,0 @@
// Package v1 contains API Schema definitions for the onepassword v1 API group
// +k8s:deepcopy-gen=package,register
// +groupName=onepassword.com
package v1

View File

@@ -1,19 +0,0 @@
// NOTE: Boilerplate only. Ignore this file.
// Package v1 contains API Schema definitions for the onepassword v1 API group
// +k8s:deepcopy-gen=package,register
// +groupName=onepassword.com
package v1
import (
"k8s.io/apimachinery/pkg/runtime/schema"
"sigs.k8s.io/controller-runtime/pkg/scheme"
)
var (
// SchemeGroupVersion is group version used to register these objects
SchemeGroupVersion = schema.GroupVersion{Group: "onepassword.com", Version: "v1"}
// SchemeBuilder is used to add go types to the GroupVersionKind scheme
SchemeBuilder = &scheme.Builder{GroupVersion: SchemeGroupVersion}
)

View File

@@ -1,10 +0,0 @@
package controller
import (
"github.com/1Password/onepassword-operator/pkg/controller/deployment"
)
func init() {
// AddToManagerFuncs is a list of functions to create controllers and add them to a manager.
AddToManagerFuncs = append(AddToManagerFuncs, deployment.Add)
}

View File

@@ -1,10 +0,0 @@
package controller
import (
"github.com/1Password/onepassword-operator/pkg/controller/onepassworditem"
)
func init() {
// AddToManagerFuncs is a list of functions to create controllers and add them to a manager.
AddToManagerFuncs = append(AddToManagerFuncs, onepassworditem.Add)
}

View File

@@ -1,19 +0,0 @@
package controller
import (
"github.com/1Password/connect-sdk-go/connect"
"sigs.k8s.io/controller-runtime/pkg/manager"
)
// AddToManagerFuncs is a list of functions to add all Controllers to the Manager
var AddToManagerFuncs []func(manager.Manager, connect.Client) error
// AddToManager adds all Controllers to the Manager
func AddToManager(m manager.Manager, opConnectClient connect.Client) error {
for _, f := range AddToManagerFuncs {
if err := f(m, opConnectClient); err != nil {
return err
}
}
return nil
}

View File

@@ -1,474 +0,0 @@
package deployment
import (
"context"
"fmt"
"regexp"
"testing"
"github.com/1Password/onepassword-operator/pkg/mocks"
op "github.com/1Password/onepassword-operator/pkg/onepassword"
"github.com/1Password/connect-sdk-go/onepassword"
"github.com/stretchr/testify/assert"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
errors2 "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
"k8s.io/kubectl/pkg/scheme"
"sigs.k8s.io/controller-runtime/pkg/client/fake"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
)
const (
deploymentKind = "Deployment"
deploymentAPIVersion = "v1"
name = "test-deployment"
namespace = "default"
vaultId = "hfnjvi6aymbsnfc2xeeoheizda"
itemId = "nwrhuano7bcwddcviubpp4mhfq"
username = "test-user"
password = "QmHumKc$mUeEem7caHtbaBaJ"
userKey = "username"
passKey = "password"
version = 123
)
type testReconcileItem struct {
testName string
deploymentResource *appsv1.Deployment
existingSecret *corev1.Secret
expectedError error
expectedResultSecret *corev1.Secret
expectedEvents []string
opItem map[string]string
existingDeployment *appsv1.Deployment
}
var (
expectedSecretData = map[string][]byte{
"password": []byte(password),
"username": []byte(username),
}
itemPath = fmt.Sprintf("vaults/%v/items/%v", vaultId, itemId)
)
var (
time = metav1.Now()
regex, _ = regexp.Compile(annotationRegExpString)
)
var tests = []testReconcileItem{
{
testName: "Test Delete Deployment where secret is being used in another deployment's volumes",
deploymentResource: &appsv1.Deployment{
TypeMeta: metav1.TypeMeta{
Kind: deploymentKind,
APIVersion: deploymentAPIVersion,
},
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: namespace,
DeletionTimestamp: &time,
Finalizers: []string{
finalizer,
},
Annotations: map[string]string{
op.ItemPathAnnotation: itemPath,
op.NameAnnotation: name,
},
},
},
existingDeployment: &appsv1.Deployment{
TypeMeta: metav1.TypeMeta{
Kind: deploymentKind,
APIVersion: deploymentAPIVersion,
},
ObjectMeta: metav1.ObjectMeta{
Name: "another-deployment",
Namespace: namespace,
Annotations: map[string]string{
op.ItemPathAnnotation: itemPath,
op.NameAnnotation: name,
},
},
Spec: appsv1.DeploymentSpec{
Template: corev1.PodTemplateSpec{
Spec: corev1.PodSpec{
Volumes: []corev1.Volume{
{
Name: name,
VolumeSource: corev1.VolumeSource{
Secret: &corev1.SecretVolumeSource{
SecretName: name,
},
},
},
},
},
},
},
},
existingSecret: &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: namespace,
Annotations: map[string]string{
op.VersionAnnotation: fmt.Sprint(version),
},
},
Data: expectedSecretData,
},
expectedError: nil,
expectedResultSecret: &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: namespace,
Annotations: map[string]string{
op.VersionAnnotation: fmt.Sprint(version),
},
},
Data: expectedSecretData,
},
opItem: map[string]string{
userKey: username,
passKey: password,
},
},
{
testName: "Test Delete Deployment where secret is being used in another deployment's container",
deploymentResource: &appsv1.Deployment{
TypeMeta: metav1.TypeMeta{
Kind: deploymentKind,
APIVersion: deploymentAPIVersion,
},
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: namespace,
DeletionTimestamp: &time,
Finalizers: []string{
finalizer,
},
Annotations: map[string]string{
op.ItemPathAnnotation: itemPath,
op.NameAnnotation: name,
},
},
},
existingDeployment: &appsv1.Deployment{
TypeMeta: metav1.TypeMeta{
Kind: deploymentKind,
APIVersion: deploymentAPIVersion,
},
ObjectMeta: metav1.ObjectMeta{
Name: "another-deployment",
Namespace: namespace,
Annotations: map[string]string{
op.ItemPathAnnotation: itemPath,
op.NameAnnotation: name,
},
},
Spec: appsv1.DeploymentSpec{
Template: corev1.PodTemplateSpec{
Spec: corev1.PodSpec{
Containers: []corev1.Container{
{
Env: []corev1.EnvVar{
{
Name: name,
ValueFrom: &corev1.EnvVarSource{
SecretKeyRef: &corev1.SecretKeySelector{
LocalObjectReference: corev1.LocalObjectReference{
Name: name,
},
Key: passKey,
},
},
},
},
},
},
},
},
},
},
existingSecret: &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: namespace,
Annotations: map[string]string{
op.VersionAnnotation: fmt.Sprint(version),
},
},
Data: expectedSecretData,
},
expectedError: nil,
expectedResultSecret: &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: namespace,
Annotations: map[string]string{
op.VersionAnnotation: fmt.Sprint(version),
},
},
Data: expectedSecretData,
},
opItem: map[string]string{
userKey: username,
passKey: password,
},
},
{
testName: "Test Delete Deployment",
deploymentResource: &appsv1.Deployment{
TypeMeta: metav1.TypeMeta{
Kind: deploymentKind,
APIVersion: deploymentAPIVersion,
},
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: namespace,
DeletionTimestamp: &time,
Finalizers: []string{
finalizer,
},
Annotations: map[string]string{
op.ItemPathAnnotation: itemPath,
op.NameAnnotation: name,
},
},
},
existingSecret: &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: namespace,
Annotations: map[string]string{
op.VersionAnnotation: fmt.Sprint(version),
},
},
Data: expectedSecretData,
},
expectedError: nil,
expectedResultSecret: nil,
opItem: map[string]string{
userKey: username,
passKey: password,
},
},
{
testName: "Test Do not update if OnePassword Item Version has not changed",
deploymentResource: &appsv1.Deployment{
TypeMeta: metav1.TypeMeta{
Kind: deploymentKind,
APIVersion: deploymentAPIVersion,
},
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: namespace,
Annotations: map[string]string{
op.ItemPathAnnotation: itemPath,
op.NameAnnotation: name,
},
},
},
existingSecret: &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: namespace,
Annotations: map[string]string{
op.VersionAnnotation: fmt.Sprint(version),
},
},
Data: expectedSecretData,
},
expectedError: nil,
expectedResultSecret: &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: namespace,
Annotations: map[string]string{
op.VersionAnnotation: fmt.Sprint(version),
},
},
Data: expectedSecretData,
},
opItem: map[string]string{
userKey: "data we don't expect to have updated",
passKey: "data we don't expect to have updated",
},
},
{
testName: "Test Updating Existing Kubernetes Secret using Deployment",
deploymentResource: &appsv1.Deployment{
TypeMeta: metav1.TypeMeta{
Kind: deploymentKind,
APIVersion: deploymentAPIVersion,
},
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: namespace,
Annotations: map[string]string{
op.ItemPathAnnotation: itemPath,
op.NameAnnotation: name,
},
},
},
existingSecret: &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: namespace,
Annotations: map[string]string{
op.VersionAnnotation: "456",
},
},
Data: expectedSecretData,
},
expectedError: nil,
expectedResultSecret: &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: namespace,
Annotations: map[string]string{
op.VersionAnnotation: fmt.Sprint(version),
},
},
Data: expectedSecretData,
},
opItem: map[string]string{
userKey: username,
passKey: password,
},
},
{
testName: "Create Deployment",
deploymentResource: &appsv1.Deployment{
TypeMeta: metav1.TypeMeta{
Kind: deploymentKind,
APIVersion: deploymentAPIVersion,
},
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: namespace,
Annotations: map[string]string{
op.ItemPathAnnotation: itemPath,
op.NameAnnotation: name,
},
},
},
existingSecret: nil,
expectedError: nil,
expectedResultSecret: &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: namespace,
Annotations: map[string]string{
op.VersionAnnotation: fmt.Sprint(version),
},
},
Data: expectedSecretData,
},
opItem: map[string]string{
userKey: username,
passKey: password,
},
},
}
func TestReconcileDepoyment(t *testing.T) {
for _, testData := range tests {
t.Run(testData.testName, func(t *testing.T) {
// Register operator types with the runtime scheme.
s := scheme.Scheme
s.AddKnownTypes(appsv1.SchemeGroupVersion, testData.deploymentResource)
// Objects to track in the fake client.
objs := []runtime.Object{
testData.deploymentResource,
}
if testData.existingSecret != nil {
objs = append(objs, testData.existingSecret)
}
if testData.existingDeployment != nil {
objs = append(objs, testData.existingDeployment)
}
// Create a fake client to mock API calls.
cl := fake.NewFakeClientWithScheme(s, objs...)
// Create a Deployment object with the scheme and mock kubernetes
// and 1Password Connect client.
opConnectClient := &mocks.TestClient{}
mocks.GetGetItemFunc = func(uuid string, vaultUUID string) (*onepassword.Item, error) {
item := onepassword.Item{}
item.Fields = generateFields(testData.opItem["username"], testData.opItem["password"])
item.Version = version
item.Vault.ID = vaultUUID
item.ID = uuid
return &item, nil
}
r := &ReconcileDeployment{
kubeClient: cl,
scheme: s,
opConnectClient: opConnectClient,
opAnnotationRegExp: regex,
}
// Mock request to simulate Reconcile() being called on an event for a
// watched resource .
req := reconcile.Request{
NamespacedName: types.NamespacedName{
Name: name,
Namespace: namespace,
},
}
_, err := r.Reconcile(req)
assert.Equal(t, testData.expectedError, err)
var expectedSecretName string
if testData.expectedResultSecret == nil {
expectedSecretName = testData.deploymentResource.Name
} else {
expectedSecretName = testData.expectedResultSecret.Name
}
// Check if Secret has been created and has the correct data
secret := &corev1.Secret{}
err = cl.Get(context.TODO(), types.NamespacedName{Name: expectedSecretName, Namespace: namespace}, secret)
if testData.expectedResultSecret == nil {
assert.Error(t, err)
assert.True(t, errors2.IsNotFound(err))
} else {
assert.Equal(t, testData.expectedResultSecret.Data, secret.Data)
assert.Equal(t, testData.expectedResultSecret.Name, secret.Name)
assert.Equal(t, testData.expectedResultSecret.Type, secret.Type)
assert.Equal(t, testData.expectedResultSecret.Annotations[op.VersionAnnotation], secret.Annotations[op.VersionAnnotation])
updatedCR := &appsv1.Deployment{}
err = cl.Get(context.TODO(), req.NamespacedName, updatedCR)
assert.NoError(t, err)
}
})
}
}
func generateFields(username, password string) []*onepassword.ItemField {
fields := []*onepassword.ItemField{
{
Label: "username",
Value: username,
},
{
Label: "password",
Value: password,
},
}
return fields
}

View File

@@ -1,155 +0,0 @@
package onepassworditem
import (
"context"
"fmt"
onepasswordv1 "github.com/1Password/onepassword-operator/pkg/apis/onepassword/v1"
kubeSecrets "github.com/1Password/onepassword-operator/pkg/kubernetessecrets"
"github.com/1Password/onepassword-operator/pkg/onepassword"
op "github.com/1Password/onepassword-operator/pkg/onepassword"
"github.com/1Password/onepassword-operator/pkg/utils"
"github.com/1Password/connect-sdk-go/connect"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/runtime"
ctrl "sigs.k8s.io/controller-runtime"
kubeClient "sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller"
"sigs.k8s.io/controller-runtime/pkg/handler"
logf "sigs.k8s.io/controller-runtime/pkg/log"
"sigs.k8s.io/controller-runtime/pkg/manager"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
"sigs.k8s.io/controller-runtime/pkg/source"
)
var log = logf.Log.WithName("controller_onepassworditem")
var finalizer = "onepassword.com/finalizer.secret"
func Add(mgr manager.Manager, opConnectClient connect.Client) error {
return add(mgr, newReconciler(mgr, opConnectClient))
}
func newReconciler(mgr manager.Manager, opConnectClient connect.Client) *ReconcileOnePasswordItem {
return &ReconcileOnePasswordItem{
kubeClient: mgr.GetClient(),
scheme: mgr.GetScheme(),
opConnectClient: opConnectClient,
}
}
func add(mgr manager.Manager, r reconcile.Reconciler) error {
c, err := controller.New("onepassworditem-controller", mgr, controller.Options{Reconciler: r})
if err != nil {
return err
}
// Watch for changes to primary resource OnePasswordItem
err = c.Watch(&source.Kind{Type: &onepasswordv1.OnePasswordItem{}}, &handler.EnqueueRequestForObject{})
if err != nil {
return err
}
return nil
}
var _ reconcile.Reconciler = &ReconcileOnePasswordItem{}
type ReconcileOnePasswordItem struct {
kubeClient kubeClient.Client
scheme *runtime.Scheme
opConnectClient connect.Client
}
func (r *ReconcileOnePasswordItem) SetupWithManager(mgr ctrl.Manager) error {
return ctrl.NewControllerManagedBy(mgr).
For(&onepasswordv1.OnePasswordItem{}).
Complete(r)
}
func (r *ReconcileOnePasswordItem) Reconcile(request reconcile.Request) (reconcile.Result, error) {
reqLogger := log.WithValues("Request.Namespace", request.Namespace, "Request.Name", request.Name)
reqLogger.Info("Reconciling OnePasswordItem")
onepassworditem := &onepasswordv1.OnePasswordItem{}
err := r.kubeClient.Get(context.Background(), request.NamespacedName, onepassworditem)
if err != nil {
if errors.IsNotFound(err) {
return reconcile.Result{}, nil
}
return reconcile.Result{}, err
}
// If the deployment is not being deleted
if onepassworditem.ObjectMeta.DeletionTimestamp.IsZero() {
// Adds a finalizer to the deployment if one does not exist.
// This is so we can handle cleanup of associated secrets properly
if !utils.ContainsString(onepassworditem.ObjectMeta.Finalizers, finalizer) {
onepassworditem.ObjectMeta.Finalizers = append(onepassworditem.ObjectMeta.Finalizers, finalizer)
if err := r.kubeClient.Update(context.Background(), onepassworditem); err != nil {
return reconcile.Result{}, err
}
}
// Handles creation or updating secrets for deployment if needed
if err := r.HandleOnePasswordItem(onepassworditem, request); err != nil {
return reconcile.Result{}, err
}
return reconcile.Result{}, nil
}
// If one password finalizer exists then we must cleanup associated secrets
if utils.ContainsString(onepassworditem.ObjectMeta.Finalizers, finalizer) {
// Delete associated kubernetes secret
if err = r.cleanupKubernetesSecret(onepassworditem); err != nil {
return reconcile.Result{}, err
}
// Remove finalizer now that cleanup is complete
if err := r.removeFinalizer(onepassworditem); err != nil {
return reconcile.Result{}, err
}
}
return reconcile.Result{}, nil
}
func (r *ReconcileOnePasswordItem) removeFinalizer(onePasswordItem *onepasswordv1.OnePasswordItem) error {
onePasswordItem.ObjectMeta.Finalizers = utils.RemoveString(onePasswordItem.ObjectMeta.Finalizers, finalizer)
if err := r.kubeClient.Update(context.Background(), onePasswordItem); err != nil {
return err
}
return nil
}
func (r *ReconcileOnePasswordItem) cleanupKubernetesSecret(onePasswordItem *onepasswordv1.OnePasswordItem) error {
kubernetesSecret := &corev1.Secret{}
kubernetesSecret.ObjectMeta.Name = onePasswordItem.Name
kubernetesSecret.ObjectMeta.Namespace = onePasswordItem.Namespace
r.kubeClient.Delete(context.Background(), kubernetesSecret)
if err := r.kubeClient.Delete(context.Background(), kubernetesSecret); err != nil {
if !errors.IsNotFound(err) {
return err
}
}
return nil
}
func (r *ReconcileOnePasswordItem) removeOnePasswordFinalizerFromOnePasswordItem(opSecret *onepasswordv1.OnePasswordItem) error {
opSecret.ObjectMeta.Finalizers = utils.RemoveString(opSecret.ObjectMeta.Finalizers, finalizer)
return r.kubeClient.Update(context.Background(), opSecret)
}
func (r *ReconcileOnePasswordItem) HandleOnePasswordItem(resource *onepasswordv1.OnePasswordItem, request reconcile.Request) error {
secretName := resource.GetName()
autoRestart := resource.Annotations[op.RestartDeploymentsAnnotation]
item, err := onepassword.GetOnePasswordItemByPath(r.opConnectClient, resource.Spec.ItemPath)
if err != nil {
return fmt.Errorf("Failed to retrieve item: %v", err)
}
return kubeSecrets.CreateKubernetesSecretFromItem(r.kubeClient, secretName, resource.Namespace, item, autoRestart)
}

View File

@@ -1,308 +0,0 @@
package onepassworditem
import (
"context"
"fmt"
"testing"
"github.com/1Password/onepassword-operator/pkg/mocks"
op "github.com/1Password/onepassword-operator/pkg/onepassword"
onepasswordv1 "github.com/1Password/onepassword-operator/pkg/apis/onepassword/v1"
"github.com/1Password/connect-sdk-go/onepassword"
"github.com/stretchr/testify/assert"
corev1 "k8s.io/api/core/v1"
errors2 "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
"k8s.io/kubectl/pkg/scheme"
"sigs.k8s.io/controller-runtime/pkg/client/fake"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
)
const (
onePasswordItemKind = "OnePasswordItem"
onePasswordItemAPIVersion = "onepassword.com/v1"
name = "test"
namespace = "default"
vaultId = "hfnjvi6aymbsnfc2xeeoheizda"
itemId = "nwrhuano7bcwddcviubpp4mhfq"
username = "test-user"
password = "QmHumKc$mUeEem7caHtbaBaJ"
userKey = "username"
passKey = "password"
version = 123
)
type testReconcileItem struct {
testName string
customResource *onepasswordv1.OnePasswordItem
existingSecret *corev1.Secret
expectedError error
expectedResultSecret *corev1.Secret
expectedEvents []string
opItem map[string]string
existingOnePasswordItem *onepasswordv1.OnePasswordItem
}
var (
expectedSecretData = map[string][]byte{
"password": []byte(password),
"username": []byte(username),
}
itemPath = fmt.Sprintf("vaults/%v/items/%v", vaultId, itemId)
)
var (
time = metav1.Now()
)
var tests = []testReconcileItem{
{
testName: "Test Delete OnePasswordItem",
customResource: &onepasswordv1.OnePasswordItem{
TypeMeta: metav1.TypeMeta{
Kind: onePasswordItemKind,
APIVersion: onePasswordItemAPIVersion,
},
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: namespace,
DeletionTimestamp: &time,
Finalizers: []string{
finalizer,
},
},
Spec: onepasswordv1.OnePasswordItemSpec{
ItemPath: itemPath,
},
},
existingSecret: &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: namespace,
Annotations: map[string]string{
op.VersionAnnotation: fmt.Sprint(version),
},
},
Data: expectedSecretData,
},
expectedError: nil,
expectedResultSecret: nil,
opItem: map[string]string{
userKey: username,
passKey: password,
},
},
{
testName: "Test Do not update if OnePassword Version has not changed",
customResource: &onepasswordv1.OnePasswordItem{
TypeMeta: metav1.TypeMeta{
Kind: onePasswordItemKind,
APIVersion: onePasswordItemAPIVersion,
},
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: namespace,
},
Spec: onepasswordv1.OnePasswordItemSpec{
ItemPath: itemPath,
},
},
existingSecret: &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: namespace,
Annotations: map[string]string{
op.VersionAnnotation: fmt.Sprint(version),
},
},
Data: expectedSecretData,
},
expectedError: nil,
expectedResultSecret: &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: namespace,
Annotations: map[string]string{
op.VersionAnnotation: fmt.Sprint(version),
},
},
Data: expectedSecretData,
},
opItem: map[string]string{
userKey: "data we don't expect to have updated",
passKey: "data we don't expect to have updated",
},
},
{
testName: "Test Updating Existing Kubernetes Secret using OnePasswordItem",
customResource: &onepasswordv1.OnePasswordItem{
TypeMeta: metav1.TypeMeta{
Kind: onePasswordItemKind,
APIVersion: onePasswordItemAPIVersion,
},
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: namespace,
},
Spec: onepasswordv1.OnePasswordItemSpec{
ItemPath: itemPath,
},
},
existingSecret: &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: namespace,
Annotations: map[string]string{
op.VersionAnnotation: "456",
},
},
Data: expectedSecretData,
},
expectedError: nil,
expectedResultSecret: &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: namespace,
Annotations: map[string]string{
op.VersionAnnotation: fmt.Sprint(version),
},
},
Data: expectedSecretData,
},
opItem: map[string]string{
userKey: username,
passKey: password,
},
},
{
testName: "Custom secret type",
customResource: &onepasswordv1.OnePasswordItem{
TypeMeta: metav1.TypeMeta{
Kind: onePasswordItemKind,
APIVersion: onePasswordItemAPIVersion,
},
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: namespace,
},
Spec: onepasswordv1.OnePasswordItemSpec{
ItemPath: itemPath,
},
},
existingSecret: nil,
expectedError: nil,
expectedResultSecret: &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: namespace,
Annotations: map[string]string{
op.VersionAnnotation: fmt.Sprint(version),
},
},
Data: expectedSecretData,
},
opItem: map[string]string{
userKey: username,
passKey: password,
},
},
}
func TestReconcileOnePasswordItem(t *testing.T) {
for _, testData := range tests {
t.Run(testData.testName, func(t *testing.T) {
// Register operator types with the runtime scheme.
s := scheme.Scheme
s.AddKnownTypes(onepasswordv1.SchemeGroupVersion, testData.customResource)
// Objects to track in the fake client.
objs := []runtime.Object{
testData.customResource,
}
if testData.existingSecret != nil {
objs = append(objs, testData.existingSecret)
}
if testData.existingOnePasswordItem != nil {
objs = append(objs, testData.existingOnePasswordItem)
}
// Create a fake client to mock API calls.
cl := fake.NewFakeClientWithScheme(s, objs...)
// Create a OnePasswordItem object with the scheme and mock kubernetes
// and 1Password Connect client.
opConnectClient := &mocks.TestClient{}
mocks.GetGetItemFunc = func(uuid string, vaultUUID string) (*onepassword.Item, error) {
item := onepassword.Item{}
item.Fields = generateFields(testData.opItem["username"], testData.opItem["password"])
item.Version = version
item.Vault.ID = vaultUUID
item.ID = uuid
return &item, nil
}
r := &ReconcileOnePasswordItem{
kubeClient: cl,
scheme: s,
opConnectClient: opConnectClient,
}
// Mock request to simulate Reconcile() being called on an event for a
// watched resource .
req := reconcile.Request{
NamespacedName: types.NamespacedName{
Name: name,
Namespace: namespace,
},
}
_, err := r.Reconcile(req)
assert.Equal(t, testData.expectedError, err)
var expectedSecretName string
if testData.expectedResultSecret == nil {
expectedSecretName = testData.customResource.Name
} else {
expectedSecretName = testData.expectedResultSecret.Name
}
// Check if Secret has been created and has the correct data
secret := &corev1.Secret{}
err = cl.Get(context.TODO(), types.NamespacedName{Name: expectedSecretName, Namespace: namespace}, secret)
if testData.expectedResultSecret == nil {
assert.Error(t, err)
assert.True(t, errors2.IsNotFound(err))
} else {
assert.Equal(t, testData.expectedResultSecret.Data, secret.Data)
assert.Equal(t, testData.expectedResultSecret.Name, secret.Name)
assert.Equal(t, testData.expectedResultSecret.Type, secret.Type)
assert.Equal(t, testData.expectedResultSecret.Annotations[op.VersionAnnotation], secret.Annotations[op.VersionAnnotation])
updatedCR := &onepasswordv1.OnePasswordItem{}
err = cl.Get(context.TODO(), req.NamespacedName, updatedCR)
assert.NoError(t, err)
}
})
}
}
func generateFields(username, password string) []*onepassword.ItemField {
fields := []*onepassword.ItemField{
{
Label: "username",
Value: username,
},
{
Label: "password",
Value: password,
},
}
return fields
}

View File

@@ -4,12 +4,22 @@ import (
"context"
"fmt"
"regexp"
"strings"
"reflect"
errs "errors"
"github.com/1Password/connect-sdk-go/onepassword"
"github.com/1Password/onepassword-operator/pkg/utils"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
kubeValidate "k8s.io/apimachinery/pkg/util/validation"
kubernetesClient "sigs.k8s.io/controller-runtime/pkg/client"
logf "sigs.k8s.io/controller-runtime/pkg/log"
)
@@ -21,24 +31,33 @@ const restartAnnotation = OnepasswordPrefix + "/last-restarted"
const ItemPathAnnotation = OnepasswordPrefix + "/item-path"
const RestartDeploymentsAnnotation = OnepasswordPrefix + "/auto-restart"
var ErrCannotUpdateSecretType = errs.New("Cannot change secret type. Secret type is immutable")
var log = logf.Log
func CreateKubernetesSecretFromItem(kubeClient kubernetesClient.Client, secretName, namespace string, item *onepassword.Item, autoRestart string) error {
func CreateKubernetesSecretFromItem(kubeClient kubernetesClient.Client, secretName, namespace string, item *onepassword.Item, autoRestart string, labels map[string]string, secretType string, secretAnnotations map[string]string, ownerRef *metav1.OwnerReference) error {
itemVersion := fmt.Sprint(item.Version)
annotations := map[string]string{
VersionAnnotation: itemVersion,
ItemPathAnnotation: fmt.Sprintf("vaults/%v/items/%v", item.Vault.ID, item.ID),
// If secretAnnotations is nil we create an empty map so we can later assign values for the OP Annotations in the map
if secretAnnotations == nil {
secretAnnotations = map[string]string{}
}
secretAnnotations[VersionAnnotation] = itemVersion
secretAnnotations[ItemPathAnnotation] = fmt.Sprintf("vaults/%v/items/%v", item.Vault.ID, item.ID)
if autoRestart != "" {
_, err := utils.StringToBool(autoRestart)
if err != nil {
log.Error(err, "Error parsing %v annotation on Secret %v. Must be true or false. Defaulting to false.", RestartDeploymentsAnnotation, secretName)
return err
}
annotations[RestartDeploymentsAnnotation] = autoRestart
secretAnnotations[RestartDeploymentsAnnotation] = autoRestart
}
secret := BuildKubernetesSecretFromOnePasswordItem(secretName, namespace, annotations, *item)
// "Opaque" and "" secret types are treated the same by Kubernetes.
secret := BuildKubernetesSecretFromOnePasswordItem(secretName, namespace, secretAnnotations, labels, secretType, *item, ownerRef)
currentSecret := &corev1.Secret{}
err := kubeClient.Get(context.Background(), types.NamespacedName{Name: secret.Name, Namespace: secret.Namespace}, currentSecret)
@@ -49,9 +68,17 @@ func CreateKubernetesSecretFromItem(kubeClient kubernetesClient.Client, secretNa
return err
}
if currentSecret.Annotations[VersionAnnotation] != itemVersion {
currentAnnotations := currentSecret.Annotations
currentLabels := currentSecret.Labels
currentSecretType := string(currentSecret.Type)
if !reflect.DeepEqual(currentSecretType, secretType) {
return ErrCannotUpdateSecretType
}
if !reflect.DeepEqual(currentAnnotations, secretAnnotations) || !reflect.DeepEqual(currentLabels, labels) {
log.Info(fmt.Sprintf("Updating Secret %v at namespace '%v'", secret.Name, secret.Namespace))
currentSecret.ObjectMeta.Annotations = annotations
currentSecret.ObjectMeta.Annotations = secretAnnotations
currentSecret.ObjectMeta.Labels = labels
currentSecret.Data = secret.Data
return kubeClient.Update(context.Background(), currentSecret)
}
@@ -60,23 +87,99 @@ func CreateKubernetesSecretFromItem(kubeClient kubernetesClient.Client, secretNa
return nil
}
func BuildKubernetesSecretFromOnePasswordItem(name, namespace string, annotations map[string]string, item onepassword.Item) *corev1.Secret {
func BuildKubernetesSecretFromOnePasswordItem(name, namespace string, annotations map[string]string, labels map[string]string, secretType string, item onepassword.Item, ownerRef *metav1.OwnerReference) *corev1.Secret {
var ownerRefs []metav1.OwnerReference
if ownerRef != nil {
ownerRefs = []metav1.OwnerReference{*ownerRef}
}
return &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: namespace,
Annotations: annotations,
Name: formatSecretName(name),
Namespace: namespace,
Annotations: annotations,
Labels: labels,
OwnerReferences: ownerRefs,
},
Data: BuildKubernetesSecretData(item.Fields),
Data: BuildKubernetesSecretData(item.Fields, item.Files),
Type: corev1.SecretType(secretType),
}
}
func BuildKubernetesSecretData(fields []*onepassword.ItemField) map[string][]byte {
func BuildKubernetesSecretData(fields []*onepassword.ItemField, files []*onepassword.File) map[string][]byte {
secretData := map[string][]byte{}
for i := 0; i < len(fields); i++ {
if fields[i].Value != "" {
secretData[fields[i].Label] = []byte(fields[i].Value)
key := formatSecretDataName(fields[i].Label)
secretData[key] = []byte(fields[i].Value)
}
}
// populate unpopulated fields from files
for _, file := range files {
content, err := file.Content()
if err != nil {
log.Error(err, "Could not load contents of file %s", file.Name)
continue
}
if content != nil {
key := file.Name
if secretData[key] == nil {
secretData[key] = content
} else {
log.Info(fmt.Sprintf("File '%s' ignored because of a field with the same name", file.Name))
}
}
}
return secretData
}
// formatSecretName rewrites a value to be a valid Secret name.
//
// The Secret meta.name and data keys must be valid DNS subdomain names
// (https://kubernetes.io/docs/concepts/configuration/secret/#overview-of-secrets)
func formatSecretName(value string) string {
if errs := kubeValidate.IsDNS1123Subdomain(value); len(errs) == 0 {
return value
}
return createValidSecretName(value)
}
// formatSecretDataName rewrites a value to be a valid Secret data key.
//
// The Secret data keys must consist of alphanumeric numbers, `-`, `_` or `.`
// (https://kubernetes.io/docs/concepts/configuration/secret/#overview-of-secrets)
func formatSecretDataName(value string) string {
if errs := kubeValidate.IsConfigMapKey(value); len(errs) == 0 {
return value
}
return createValidSecretDataName(value)
}
var invalidDNS1123Chars = regexp.MustCompile("[^a-z0-9-.]+")
func createValidSecretName(value string) string {
result := strings.ToLower(value)
result = invalidDNS1123Chars.ReplaceAllString(result, "-")
if len(result) > kubeValidate.DNS1123SubdomainMaxLength {
result = result[0:kubeValidate.DNS1123SubdomainMaxLength]
}
// first and last character MUST be alphanumeric
return strings.Trim(result, "-.")
}
var invalidDataChars = regexp.MustCompile("[^a-zA-Z0-9-._]+")
var invalidStartEndChars = regexp.MustCompile("(^[^a-zA-Z0-9-._]+|[^a-zA-Z0-9-._]+$)")
func createValidSecretDataName(value string) string {
result := invalidStartEndChars.ReplaceAllString(value, "")
result = invalidDataChars.ReplaceAllString(result, "-")
if len(result) > kubeValidate.DNS1123SubdomainMaxLength {
result = result[0:kubeValidate.DNS1123SubdomainMaxLength]
}
return result
}

View File

@@ -8,7 +8,9 @@ import (
"github.com/1Password/connect-sdk-go/onepassword"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
kubeValidate "k8s.io/apimachinery/pkg/util/validation"
"k8s.io/client-go/kubernetes"
"sigs.k8s.io/controller-runtime/pkg/client/fake"
)
@@ -30,7 +32,13 @@ func TestCreateKubernetesSecretFromOnePasswordItem(t *testing.T) {
item.ID = "h46bb3jddvay7nxopfhvlwg35q"
kubeClient := fake.NewFakeClient()
err := CreateKubernetesSecretFromItem(kubeClient, secretName, namespace, &item, restartDeploymentAnnotation)
secretLabels := map[string]string{}
secretAnnotations := map[string]string{
"testAnnotation": "exists",
}
secretType := ""
err := CreateKubernetesSecretFromItem(kubeClient, secretName, namespace, &item, restartDeploymentAnnotation, secretLabels, secretType, secretAnnotations, nil)
if err != nil {
t.Errorf("Unexpected error: %v", err)
}
@@ -42,6 +50,58 @@ func TestCreateKubernetesSecretFromOnePasswordItem(t *testing.T) {
}
compareFields(item.Fields, createdSecret.Data, t)
compareAnnotationsToItem(createdSecret.Annotations, item, t)
if createdSecret.Annotations["testAnnotation"] != "exists" {
t.Errorf("Expected testAnnotation to be merged with existing annotations, but wasn't.")
}
}
func TestKubernetesSecretFromOnePasswordItemOwnerReferences(t *testing.T) {
secretName := "test-secret-name"
namespace := "test"
item := onepassword.Item{}
item.Fields = generateFields(5)
item.Version = 123
item.Vault.ID = "hfnjvi6aymbsnfc2xeeoheizda"
item.ID = "h46bb3jddvay7nxopfhvlwg35q"
kubeClient := fake.NewFakeClient()
secretLabels := map[string]string{}
secretAnnotations := map[string]string{
"testAnnotation": "exists",
}
secretType := ""
ownerRef := &metav1.OwnerReference{
Kind: "Deployment",
APIVersion: "apps/v1",
Name: "test-deployment",
UID: types.UID("test-uid"),
}
err := CreateKubernetesSecretFromItem(kubeClient, secretName, namespace, &item, restartDeploymentAnnotation, secretLabels, secretType, secretAnnotations, ownerRef)
if err != nil {
t.Errorf("Unexpected error: %v", err)
}
createdSecret := &corev1.Secret{}
err = kubeClient.Get(context.Background(), types.NamespacedName{Name: secretName, Namespace: namespace}, createdSecret)
// Check owner references.
gotOwnerRefs := createdSecret.ObjectMeta.OwnerReferences
if len(gotOwnerRefs) != 1 {
t.Errorf("Expected owner references length: 1 but got: %d", len(gotOwnerRefs))
}
expOwnerRef := metav1.OwnerReference{
Kind: "Deployment",
APIVersion: "apps/v1",
Name: "test-deployment",
UID: types.UID("test-uid"),
}
gotOwnerRef := gotOwnerRefs[0]
if gotOwnerRef != expOwnerRef {
t.Errorf("Expected owner reference value: %v but got: %v", expOwnerRef, gotOwnerRef)
}
}
func TestUpdateKubernetesSecretFromOnePasswordItem(t *testing.T) {
@@ -55,7 +115,12 @@ func TestUpdateKubernetesSecretFromOnePasswordItem(t *testing.T) {
item.ID = "h46bb3jddvay7nxopfhvlwg35q"
kubeClient := fake.NewFakeClient()
err := CreateKubernetesSecretFromItem(kubeClient, secretName, namespace, &item, restartDeploymentAnnotation)
secretLabels := map[string]string{}
secretAnnotations := map[string]string{}
secretType := ""
err := CreateKubernetesSecretFromItem(kubeClient, secretName, namespace, &item, restartDeploymentAnnotation, secretLabels, secretType, secretAnnotations, nil)
if err != nil {
t.Errorf("Unexpected error: %v", err)
}
@@ -66,7 +131,7 @@ func TestUpdateKubernetesSecretFromOnePasswordItem(t *testing.T) {
newItem.Version = 456
newItem.Vault.ID = "hfnjvi6aymbsnfc2xeeoheizda"
newItem.ID = "h46bb3jddvay7nxopfhvlwg35q"
err = CreateKubernetesSecretFromItem(kubeClient, secretName, namespace, &newItem, restartDeploymentAnnotation)
err = CreateKubernetesSecretFromItem(kubeClient, secretName, namespace, &newItem, restartDeploymentAnnotation, secretLabels, secretType, secretAnnotations, nil)
if err != nil {
t.Errorf("Unexpected error: %v", err)
}
@@ -82,7 +147,7 @@ func TestUpdateKubernetesSecretFromOnePasswordItem(t *testing.T) {
func TestBuildKubernetesSecretData(t *testing.T) {
fields := generateFields(5)
secretData := BuildKubernetesSecretData(fields)
secretData := BuildKubernetesSecretData(fields, nil)
if len(secretData) != len(fields) {
t.Errorf("Unexpected number of secret fields returned. Expected 3, got %v", len(secretData))
}
@@ -99,9 +164,11 @@ func TestBuildKubernetesSecretFromOnePasswordItem(t *testing.T) {
}
item := onepassword.Item{}
item.Fields = generateFields(5)
labels := map[string]string{}
secretType := ""
kubeSecret := BuildKubernetesSecretFromOnePasswordItem(name, namespace, annotations, item)
if kubeSecret.Name != name {
kubeSecret := BuildKubernetesSecretFromOnePasswordItem(name, namespace, annotations, labels, secretType, item, nil)
if kubeSecret.Name != strings.ToLower(name) {
t.Errorf("Expected name value: %v but got: %v", name, kubeSecret.Name)
}
if kubeSecret.Namespace != namespace {
@@ -113,6 +180,79 @@ func TestBuildKubernetesSecretFromOnePasswordItem(t *testing.T) {
compareFields(item.Fields, kubeSecret.Data, t)
}
func TestBuildKubernetesSecretFixesInvalidLabels(t *testing.T) {
name := "inV@l1d k8s secret%name"
expectedName := "inv-l1d-k8s-secret-name"
namespace := "someNamespace"
annotations := map[string]string{
"annotationKey": "annotationValue",
}
labels := map[string]string{}
item := onepassword.Item{}
secretType := ""
item.Fields = []*onepassword.ItemField{
{
Label: "label w%th invalid ch!rs-",
Value: "value1",
},
{
Label: strings.Repeat("x", kubeValidate.DNS1123SubdomainMaxLength+1),
Value: "name exceeds max length",
},
}
kubeSecret := BuildKubernetesSecretFromOnePasswordItem(name, namespace, annotations, labels, secretType, item, nil)
// Assert Secret's meta.name was fixed
if kubeSecret.Name != expectedName {
t.Errorf("Expected name value: %v but got: %v", name, kubeSecret.Name)
}
if kubeSecret.Namespace != namespace {
t.Errorf("Expected namespace value: %v but got: %v", namespace, kubeSecret.Namespace)
}
// assert labels were fixed for each data key
for key := range kubeSecret.Data {
if !validLabel(key) {
t.Errorf("Expected valid kubernetes label, got %s", key)
}
}
}
func TestCreateKubernetesTLSSecretFromOnePasswordItem(t *testing.T) {
secretName := "tls-test-secret-name"
namespace := "test"
item := onepassword.Item{}
item.Fields = generateFields(5)
item.Version = 123
item.Vault.ID = "hfnjvi6aymbsnfc2xeeoheizda"
item.ID = "h46bb3jddvay7nxopfhvlwg35q"
kubeClient := fake.NewFakeClient()
secretLabels := map[string]string{}
secretAnnotations := map[string]string{
"testAnnotation": "exists",
}
secretType := "kubernetes.io/tls"
err := CreateKubernetesSecretFromItem(kubeClient, secretName, namespace, &item, restartDeploymentAnnotation, secretLabels, secretType, secretAnnotations, nil)
if err != nil {
t.Errorf("Unexpected error: %v", err)
}
createdSecret := &corev1.Secret{}
err = kubeClient.Get(context.Background(), types.NamespacedName{Name: secretName, Namespace: namespace}, createdSecret)
if err != nil {
t.Errorf("Secret was not created: %v", err)
}
if createdSecret.Type != corev1.SecretTypeTLS {
t.Errorf("Expected secretType to be of tyype corev1.SecretTypeTLS, got %s", string(createdSecret.Type))
}
}
func compareAnnotationsToItem(annotations map[string]string, item onepassword.Item, t *testing.T) {
actualVaultId, actualItemId, err := ParseVaultIdAndItemIdFromPath(annotations[ItemPathAnnotation])
if err != nil {
@@ -164,3 +304,10 @@ func ParseVaultIdAndItemIdFromPath(path string) (string, string, error) {
}
return "", "", fmt.Errorf("%q is not an acceptable path for One Password item. Must be of the format: `vaults/{vault_id}/items/{item_id}`", path)
}
func validLabel(v string) bool {
if err := kubeValidate.IsConfigMapKey(v); len(err) > 0 {
return false
}
return true
}

View File

@@ -7,6 +7,7 @@ import (
type TestClient struct {
GetVaultsFunc func() ([]onepassword.Vault, error)
GetVaultsByTitleFunc func(title string) ([]onepassword.Vault, error)
GetVaultFunc func(uuid string) (*onepassword.Vault, error)
GetItemFunc func(uuid string, vaultUUID string) (*onepassword.Item, error)
GetItemsFunc func(vaultUUID string) ([]onepassword.Item, error)
GetItemsByTitleFunc func(title string, vaultUUID string) ([]onepassword.Item, error)
@@ -14,11 +15,14 @@ type TestClient struct {
CreateItemFunc func(item *onepassword.Item, vaultUUID string) (*onepassword.Item, error)
UpdateItemFunc func(item *onepassword.Item, vaultUUID string) (*onepassword.Item, error)
DeleteItemFunc func(item *onepassword.Item, vaultUUID string) error
GetFileFunc func(uuid string, itemUUID string, vaultUUID string) (*onepassword.File, error)
GetFileContentFunc func(file *onepassword.File) ([]byte, error)
}
var (
GetGetVaultsFunc func() ([]onepassword.Vault, error)
DoGetVaultsByTitleFunc func(title string) ([]onepassword.Vault, error)
DoGetVaultFunc func(uuid string) (*onepassword.Vault, error)
GetGetItemFunc func(uuid string, vaultUUID string) (*onepassword.Item, error)
DoGetItemsByTitleFunc func(title string, vaultUUID string) ([]onepassword.Item, error)
DoGetItemByTitleFunc func(title string, vaultUUID string) (*onepassword.Item, error)
@@ -26,6 +30,8 @@ var (
DoDeleteItemFunc func(item *onepassword.Item, vaultUUID string) error
DoGetItemsFunc func(vaultUUID string) ([]onepassword.Item, error)
DoUpdateItemFunc func(item *onepassword.Item, vaultUUID string) (*onepassword.Item, error)
DoGetFileFunc func(uuid string, itemUUID string, vaultUUID string) (*onepassword.File, error)
DoGetFileContentFunc func(file *onepassword.File) ([]byte, error)
)
// Do is the mock client's `Do` func
@@ -37,6 +43,10 @@ func (m *TestClient) GetVaultsByTitle(title string) ([]onepassword.Vault, error)
return DoGetVaultsByTitleFunc(title)
}
func (m *TestClient) GetVault(uuid string) (*onepassword.Vault, error) {
return DoGetVaultFunc(uuid)
}
func (m *TestClient) GetItem(uuid string, vaultUUID string) (*onepassword.Item, error) {
return GetGetItemFunc(uuid, vaultUUID)
}
@@ -64,3 +74,11 @@ func (m *TestClient) DeleteItem(item *onepassword.Item, vaultUUID string) error
func (m *TestClient) UpdateItem(item *onepassword.Item, vaultUUID string) (*onepassword.Item, error) {
return DoUpdateItemFunc(item, vaultUUID)
}
func (m *TestClient) GetFile(uuid string, itemUUID string, vaultUUID string) (*onepassword.File, error) {
return DoGetFileFunc(uuid, itemUUID, vaultUUID)
}
func (m *TestClient) GetFileContent(file *onepassword.File) ([]byte, error) {
return DoGetFileContentFunc(file)
}

View File

@@ -2,6 +2,7 @@ package onepassword
import (
"context"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"os"
appsv1 "k8s.io/api/apps/v1"
@@ -17,13 +18,13 @@ var logConnectSetup = logf.Log.WithName("ConnectSetup")
var deploymentPath = "deploy/connect/deployment.yaml"
var servicePath = "deploy/connect/service.yaml"
func SetupConnect(kubeClient client.Client) error {
err := setupService(kubeClient, servicePath)
func SetupConnect(kubeClient client.Client, deploymentNamespace string) error {
err := setupService(kubeClient, servicePath, deploymentNamespace)
if err != nil {
return err
}
err = setupDeployment(kubeClient, deploymentPath)
err = setupDeployment(kubeClient, deploymentPath, deploymentNamespace)
if err != nil {
return err
}
@@ -31,22 +32,22 @@ func SetupConnect(kubeClient client.Client) error {
return nil
}
func setupDeployment(kubeClient client.Client, deploymentPath string) error {
func setupDeployment(kubeClient client.Client, deploymentPath string, deploymentNamespace string) error {
existingDeployment := &appsv1.Deployment{}
// check if deployment has already been created
err := kubeClient.Get(context.Background(), types.NamespacedName{Name: "onepassword-connect", Namespace: "default"}, existingDeployment)
err := kubeClient.Get(context.Background(), types.NamespacedName{Name: "onepassword-connect", Namespace: deploymentNamespace}, existingDeployment)
if err != nil {
if errors.IsNotFound(err) {
logConnectSetup.Info("No existing Connect deployment found. Creating Deployment")
return createDeployment(kubeClient, deploymentPath)
return createDeployment(kubeClient, deploymentPath, deploymentNamespace)
}
}
return err
}
func createDeployment(kubeClient client.Client, deploymentPath string) error {
deployment, err := getDeploymentToCreate(deploymentPath)
func createDeployment(kubeClient client.Client, deploymentPath string, deploymentNamespace string) error {
deployment, err := getDeploymentToCreate(deploymentPath, deploymentNamespace)
if err != nil {
return err
}
@@ -59,12 +60,16 @@ func createDeployment(kubeClient client.Client, deploymentPath string) error {
return nil
}
func getDeploymentToCreate(deploymentPath string) (*appsv1.Deployment, error) {
func getDeploymentToCreate(deploymentPath string, deploymentNamespace string) (*appsv1.Deployment, error) {
f, err := os.Open(deploymentPath)
if err != nil {
return nil, err
}
deployment := &appsv1.Deployment{}
deployment := &appsv1.Deployment{
ObjectMeta: v1.ObjectMeta{
Namespace: deploymentNamespace,
},
}
err = yaml.NewYAMLOrJSONDecoder(f, 4096).Decode(deployment)
if err != nil {
@@ -73,26 +78,30 @@ func getDeploymentToCreate(deploymentPath string) (*appsv1.Deployment, error) {
return deployment, nil
}
func setupService(kubeClient client.Client, servicePath string) error {
func setupService(kubeClient client.Client, servicePath string, deploymentNamespace string) error {
existingService := &corev1.Service{}
//check if service has already been created
err := kubeClient.Get(context.Background(), types.NamespacedName{Name: "onepassword-connect", Namespace: "default"}, existingService)
err := kubeClient.Get(context.Background(), types.NamespacedName{Name: "onepassword-connect", Namespace: deploymentNamespace}, existingService)
if err != nil {
if errors.IsNotFound(err) {
logConnectSetup.Info("No existing Connect service found. Creating Service")
return createService(kubeClient, servicePath)
return createService(kubeClient, servicePath, deploymentNamespace)
}
}
return err
}
func createService(kubeClient client.Client, servicePath string) error {
func createService(kubeClient client.Client, servicePath string, deploymentNamespace string) error {
f, err := os.Open(servicePath)
if err != nil {
return err
}
service := &corev1.Service{}
service := &corev1.Service{
ObjectMeta: v1.ObjectMeta{
Namespace: deploymentNamespace,
},
}
err = yaml.NewYAMLOrJSONDecoder(f, 4096).Decode(service)
if err != nil {

View File

@@ -25,7 +25,7 @@ func TestServiceSetup(t *testing.T) {
// Create a fake client to mock API calls.
client := fake.NewFakeClientWithScheme(s, objs...)
err := setupService(client, "../../deploy/connect/service.yaml")
err := setupService(client, "../../deploy/connect/service.yaml", defaultNamespacedName.Namespace)
if err != nil {
t.Errorf("Error Setting Up Connect: %v", err)
@@ -50,7 +50,7 @@ func TestDeploymentSetup(t *testing.T) {
// Create a fake client to mock API calls.
client := fake.NewFakeClientWithScheme(s, objs...)
err := setupDeployment(client, "../../deploy/connect/deployment.yaml")
err := setupDeployment(client, "../../deploy/connect/deployment.yaml", defaultNamespacedName.Namespace)
if err != nil {
t.Errorf("Error Setting Up Connect: %v", err)

View File

@@ -1,6 +1,8 @@
package onepassword
import corev1 "k8s.io/api/core/v1"
import (
corev1 "k8s.io/api/core/v1"
)
func AreContainersUsingSecrets(containers []corev1.Container, secrets map[string]*corev1.Secret) bool {
for i := 0; i < len(containers); i++ {
@@ -13,6 +15,15 @@ func AreContainersUsingSecrets(containers []corev1.Container, secrets map[string
}
}
}
envFromVariables := containers[i].EnvFrom
for j := 0; j < len(envFromVariables); j++ {
if envFromVariables[j].SecretRef != nil {
_, ok := secrets[envFromVariables[j].SecretRef.Name]
if ok {
return true
}
}
}
}
return false
}
@@ -28,6 +39,15 @@ func AppendUpdatedContainerSecrets(containers []corev1.Container, secrets map[st
}
}
}
envFromVariables := containers[i].EnvFrom
for j := 0; j < len(envFromVariables); j++ {
if envFromVariables[j].SecretRef != nil {
secret, ok := secrets[envFromVariables[j].SecretRef.LocalObjectReference.Name]
if ok {
updatedDeploymentSecrets[secret.Name] = secret
}
}
}
}
return updatedDeploymentSecrets
}

View File

@@ -4,9 +4,10 @@ import (
"testing"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
func TestAreContainersUsingSecrets(t *testing.T) {
func TestAreContainersUsingSecretsFromEnv(t *testing.T) {
secretNamesToSearch := map[string]*corev1.Secret{
"onepassword-database-secret": &corev1.Secret{},
"onepassword-api-key": &corev1.Secret{},
@@ -18,7 +19,26 @@ func TestAreContainersUsingSecrets(t *testing.T) {
"some_other_key",
}
containers := generateContainers(containerSecretNames)
containers := generateContainersWithSecretRefsFromEnv(containerSecretNames)
if !AreContainersUsingSecrets(containers, secretNamesToSearch) {
t.Errorf("Expected that containers were using secrets but they were not detected.")
}
}
func TestAreContainersUsingSecretsFromEnvFrom(t *testing.T) {
secretNamesToSearch := map[string]*corev1.Secret{
"onepassword-database-secret": {},
"onepassword-api-key": {},
}
containerSecretNames := []string{
"onepassword-database-secret",
"onepassword-api-key",
"some_other_key",
}
containers := generateContainersWithSecretRefsFromEnvFrom(containerSecretNames)
if !AreContainersUsingSecrets(containers, secretNamesToSearch) {
t.Errorf("Expected that containers were using secrets but they were not detected.")
@@ -27,17 +47,39 @@ func TestAreContainersUsingSecrets(t *testing.T) {
func TestAreContainersNotUsingSecrets(t *testing.T) {
secretNamesToSearch := map[string]*corev1.Secret{
"onepassword-database-secret": &corev1.Secret{},
"onepassword-api-key": &corev1.Secret{},
"onepassword-database-secret": {},
"onepassword-api-key": {},
}
containerSecretNames := []string{
"some_other_key",
}
containers := generateContainers(containerSecretNames)
containers := generateContainersWithSecretRefsFromEnv(containerSecretNames)
if AreContainersUsingSecrets(containers, secretNamesToSearch) {
t.Errorf("Expected that containers were not using secrets but they were detected.")
}
}
func TestAppendUpdatedContainerSecretsParsesEnvFromEnv(t *testing.T) {
secretNamesToSearch := map[string]*corev1.Secret{
"onepassword-database-secret": {},
"onepassword-api-key": {ObjectMeta: metav1.ObjectMeta{Name: "onepassword-api-key"}},
}
containerSecretNames := []string{
"onepassword-api-key",
}
containers := generateContainersWithSecretRefsFromEnvFrom(containerSecretNames)
updatedDeploymentSecrets := map[string]*corev1.Secret{}
updatedDeploymentSecrets = AppendUpdatedContainerSecrets(containers, secretNamesToSearch, updatedDeploymentSecrets)
secretKeyName := "onepassword-api-key"
if updatedDeploymentSecrets[secretKeyName] != secretNamesToSearch[secretKeyName] {
t.Errorf("Expected that updated Secret from envfrom is found.")
}
}

View File

@@ -39,7 +39,7 @@ func TestIsDeploymentUsingSecretsUsingContainers(t *testing.T) {
}
deployment := &appsv1.Deployment{}
deployment.Spec.Template.Spec.Containers = generateContainers(containerSecretNames)
deployment.Spec.Template.Spec.Containers = generateContainersWithSecretRefsFromEnv(containerSecretNames)
if !IsDeploymentUsingSecrets(deployment, secretNamesToSearch) {
t.Errorf("Expected that deployment was using secrets but they were not detected.")
}

View File

@@ -30,6 +30,14 @@ func GetOnePasswordItemByPath(opConnectClient connect.Client, path string) (*one
if err != nil {
return nil, err
}
for _, file := range item.Files {
_, err := opConnectClient.GetFileContent(file)
if err != nil {
return nil, err
}
}
return item, nil
}

View File

@@ -17,8 +17,7 @@ func generateVolumes(names []string) []corev1.Volume {
}
return volumes
}
func generateContainers(names []string) []corev1.Container {
func generateContainersWithSecretRefsFromEnv(names []string) []corev1.Container {
containers := []corev1.Container{}
for i := 0; i < len(names); i++ {
container := corev1.Container{
@@ -40,3 +39,16 @@ func generateContainers(names []string) []corev1.Container {
}
return containers
}
func generateContainersWithSecretRefsFromEnvFrom(names []string) []corev1.Container {
containers := []corev1.Container{}
for i := 0; i < len(names); i++ {
container := corev1.Container{
EnvFrom: []corev1.EnvFromSource{
{SecretRef: &corev1.SecretEnvSource{LocalObjectReference: corev1.LocalObjectReference{Name: names[i]}}},
},
}
containers = append(containers, container)
}
return containers
}

View File

@@ -5,6 +5,8 @@ import (
"fmt"
"time"
v1 "github.com/1Password/onepassword-operator/api/v1"
kubeSecrets "github.com/1Password/onepassword-operator/pkg/kubernetessecrets"
"github.com/1Password/onepassword-operator/pkg/utils"
@@ -116,22 +118,30 @@ func (h *SecretUpdateHandler) updateKubernetesSecrets() (map[string]map[string]*
continue
}
item, err := GetOnePasswordItemByPath(h.opConnectClient, secret.Annotations[ItemPathAnnotation])
OnePasswordItemPath := h.getPathFromOnePasswordItem(secret)
item, err := GetOnePasswordItemByPath(h.opConnectClient, OnePasswordItemPath)
if err != nil {
return nil, fmt.Errorf("Failed to retrieve item: %v", err)
log.Error(err, "failed to retrieve 1Password item at path \"%s\" for secret \"%s\"", secret.Annotations[ItemPathAnnotation], secret.Name)
continue
}
itemVersion := fmt.Sprint(item.Version)
if currentVersion != itemVersion {
itemPathString := fmt.Sprintf("vaults/%v/items/%v", item.Vault.ID, item.ID)
if currentVersion != itemVersion || secret.Annotations[ItemPathAnnotation] != itemPathString {
if isItemLockedForForcedRestarts(item) {
log.Info(fmt.Sprintf("Secret '%v' has been updated in 1Password but is set to be ignored. Updates to an ignored secret will not trigger an update to a kubernetes secret or a rolling restart.", secret.GetName()))
secret.Annotations[VersionAnnotation] = itemVersion
secret.Annotations[ItemPathAnnotation] = itemPathString
h.client.Update(context.Background(), &secret)
continue
}
log.Info(fmt.Sprintf("Updating kubernetes secret '%v'", secret.GetName()))
secret.Annotations[VersionAnnotation] = itemVersion
updatedSecret := kubeSecrets.BuildKubernetesSecretFromOnePasswordItem(secret.Name, secret.Namespace, secret.Annotations, *item)
secret.Annotations[ItemPathAnnotation] = itemPathString
updatedSecret := kubeSecrets.BuildKubernetesSecretFromOnePasswordItem(secret.Name, secret.Namespace, secret.Annotations, secret.Labels, string(secret.Type), *item, nil)
log.Info(fmt.Sprintf("New secret path: %v and version: %v", updatedSecret.Annotations[ItemPathAnnotation], updatedSecret.Annotations[VersionAnnotation]))
h.client.Update(context.Background(), updatedSecret)
if updatedSecrets[secret.Namespace] == nil {
updatedSecrets[secret.Namespace] = make(map[string]*corev1.Secret)
@@ -176,6 +186,22 @@ func (h *SecretUpdateHandler) getIsSetForAutoRestartByNamespaceMap() (map[string
return namespacesMap, nil
}
func (h *SecretUpdateHandler) getPathFromOnePasswordItem(secret corev1.Secret) string {
onePasswordItem := &v1.OnePasswordItem{}
// Search for our original OnePasswordItem if it exists
err := h.client.Get(context.TODO(), client.ObjectKey{
Namespace: secret.Namespace,
Name: secret.Name}, onePasswordItem)
if err == nil {
return onePasswordItem.Spec.ItemPath
}
// If we can't find the OnePassword Item we'll just return the annotation from the secret item.
return secret.Annotations[ItemPathAnnotation]
}
func isSecretSetForAutoRestart(secret *corev1.Secret, deployment *appsv1.Deployment, setForAutoRestartByNamespace map[string]bool) bool {
restartDeployment := secret.Annotations[RestartDeploymentsAnnotation]
//If annotation for auto restarts for deployment is not set. Check for the annotation on its namepsace

45
pkg/utils/k8sutil.go Normal file
View File

@@ -0,0 +1,45 @@
package utils
import (
"fmt"
"io/ioutil"
"os"
"strings"
)
var ForceRunModeEnv = "OSDK_FORCE_RUN_MODE"
type RunModeType string
const (
LocalRunMode RunModeType = "local"
ClusterRunMode RunModeType = "cluster"
)
// ErrNoNamespace indicates that a namespace could not be found for the current
// environment
var ErrNoNamespace = fmt.Errorf("namespace not found for current environment")
// ErrRunLocal indicates that the operator is set to run in local mode (this error
// is returned by functions that only work on operators running in cluster mode)
var ErrRunLocal = fmt.Errorf("operator run mode forced to local")
// GetOperatorNamespace returns the namespace the operator should be running in.
func GetOperatorNamespace() (string, error) {
if isRunModeLocal() {
return "", ErrRunLocal
}
nsBytes, err := ioutil.ReadFile("/var/run/secrets/kubernetes.io/serviceaccount/namespace")
if err != nil {
if os.IsNotExist(err) {
return "", ErrNoNamespace
}
return "", err
}
ns := strings.TrimSpace(string(nsBytes))
return ns, nil
}
func isRunModeLocal() bool {
return os.Getenv(ForceRunModeEnv) == string(LocalRunMode)
}

View File

@@ -1,5 +0,0 @@
// +build tools
// Place any runtime dependencies as imports in this file.
// Go modules will be forced to download and install them.
package tools

202
vendor/cloud.google.com/go/LICENSE generated vendored
View File

@@ -1,202 +0,0 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@@ -1,12 +0,0 @@
{
"name": "metadata",
"name_pretty": "Google Compute Engine Metadata API",
"product_documentation": "https://cloud.google.com/compute/docs/storing-retrieving-metadata",
"client_documentation": "https://godoc.org/cloud.google.com/go/compute/metadata",
"release_level": "ga",
"language": "go",
"repo": "googleapis/google-cloud-go",
"distribution_name": "cloud.google.com/go/compute/metadata",
"api_id": "compute:metadata",
"requires_billing": false
}

View File

@@ -1,526 +0,0 @@
// Copyright 2014 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package metadata provides access to Google Compute Engine (GCE)
// metadata and API service accounts.
//
// This package is a wrapper around the GCE metadata service,
// as documented at https://developers.google.com/compute/docs/metadata.
package metadata // import "cloud.google.com/go/compute/metadata"
import (
"context"
"encoding/json"
"fmt"
"io/ioutil"
"net"
"net/http"
"net/url"
"os"
"runtime"
"strings"
"sync"
"time"
)
const (
// metadataIP is the documented metadata server IP address.
metadataIP = "169.254.169.254"
// metadataHostEnv is the environment variable specifying the
// GCE metadata hostname. If empty, the default value of
// metadataIP ("169.254.169.254") is used instead.
// This is variable name is not defined by any spec, as far as
// I know; it was made up for the Go package.
metadataHostEnv = "GCE_METADATA_HOST"
userAgent = "gcloud-golang/0.1"
)
type cachedValue struct {
k string
trim bool
mu sync.Mutex
v string
}
var (
projID = &cachedValue{k: "project/project-id", trim: true}
projNum = &cachedValue{k: "project/numeric-project-id", trim: true}
instID = &cachedValue{k: "instance/id", trim: true}
)
var (
defaultClient = &Client{hc: &http.Client{
Transport: &http.Transport{
Dial: (&net.Dialer{
Timeout: 2 * time.Second,
KeepAlive: 30 * time.Second,
}).Dial,
ResponseHeaderTimeout: 2 * time.Second,
},
}}
subscribeClient = &Client{hc: &http.Client{
Transport: &http.Transport{
Dial: (&net.Dialer{
Timeout: 2 * time.Second,
KeepAlive: 30 * time.Second,
}).Dial,
},
}}
)
// NotDefinedError is returned when requested metadata is not defined.
//
// The underlying string is the suffix after "/computeMetadata/v1/".
//
// This error is not returned if the value is defined to be the empty
// string.
type NotDefinedError string
func (suffix NotDefinedError) Error() string {
return fmt.Sprintf("metadata: GCE metadata %q not defined", string(suffix))
}
func (c *cachedValue) get(cl *Client) (v string, err error) {
defer c.mu.Unlock()
c.mu.Lock()
if c.v != "" {
return c.v, nil
}
if c.trim {
v, err = cl.getTrimmed(c.k)
} else {
v, err = cl.Get(c.k)
}
if err == nil {
c.v = v
}
return
}
var (
onGCEOnce sync.Once
onGCE bool
)
// OnGCE reports whether this process is running on Google Compute Engine.
func OnGCE() bool {
onGCEOnce.Do(initOnGCE)
return onGCE
}
func initOnGCE() {
onGCE = testOnGCE()
}
func testOnGCE() bool {
// The user explicitly said they're on GCE, so trust them.
if os.Getenv(metadataHostEnv) != "" {
return true
}
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
resc := make(chan bool, 2)
// Try two strategies in parallel.
// See https://github.com/googleapis/google-cloud-go/issues/194
go func() {
req, _ := http.NewRequest("GET", "http://"+metadataIP, nil)
req.Header.Set("User-Agent", userAgent)
res, err := defaultClient.hc.Do(req.WithContext(ctx))
if err != nil {
resc <- false
return
}
defer res.Body.Close()
resc <- res.Header.Get("Metadata-Flavor") == "Google"
}()
go func() {
addrs, err := net.LookupHost("metadata.google.internal")
if err != nil || len(addrs) == 0 {
resc <- false
return
}
resc <- strsContains(addrs, metadataIP)
}()
tryHarder := systemInfoSuggestsGCE()
if tryHarder {
res := <-resc
if res {
// The first strategy succeeded, so let's use it.
return true
}
// Wait for either the DNS or metadata server probe to
// contradict the other one and say we are running on
// GCE. Give it a lot of time to do so, since the system
// info already suggests we're running on a GCE BIOS.
timer := time.NewTimer(5 * time.Second)
defer timer.Stop()
select {
case res = <-resc:
return res
case <-timer.C:
// Too slow. Who knows what this system is.
return false
}
}
// There's no hint from the system info that we're running on
// GCE, so use the first probe's result as truth, whether it's
// true or false. The goal here is to optimize for speed for
// users who are NOT running on GCE. We can't assume that
// either a DNS lookup or an HTTP request to a blackholed IP
// address is fast. Worst case this should return when the
// metaClient's Transport.ResponseHeaderTimeout or
// Transport.Dial.Timeout fires (in two seconds).
return <-resc
}
// systemInfoSuggestsGCE reports whether the local system (without
// doing network requests) suggests that we're running on GCE. If this
// returns true, testOnGCE tries a bit harder to reach its metadata
// server.
func systemInfoSuggestsGCE() bool {
if runtime.GOOS != "linux" {
// We don't have any non-Linux clues available, at least yet.
return false
}
slurp, _ := ioutil.ReadFile("/sys/class/dmi/id/product_name")
name := strings.TrimSpace(string(slurp))
return name == "Google" || name == "Google Compute Engine"
}
// Subscribe calls Client.Subscribe on a client designed for subscribing (one with no
// ResponseHeaderTimeout).
func Subscribe(suffix string, fn func(v string, ok bool) error) error {
return subscribeClient.Subscribe(suffix, fn)
}
// Get calls Client.Get on the default client.
func Get(suffix string) (string, error) { return defaultClient.Get(suffix) }
// ProjectID returns the current instance's project ID string.
func ProjectID() (string, error) { return defaultClient.ProjectID() }
// NumericProjectID returns the current instance's numeric project ID.
func NumericProjectID() (string, error) { return defaultClient.NumericProjectID() }
// InternalIP returns the instance's primary internal IP address.
func InternalIP() (string, error) { return defaultClient.InternalIP() }
// ExternalIP returns the instance's primary external (public) IP address.
func ExternalIP() (string, error) { return defaultClient.ExternalIP() }
// Email calls Client.Email on the default client.
func Email(serviceAccount string) (string, error) { return defaultClient.Email(serviceAccount) }
// Hostname returns the instance's hostname. This will be of the form
// "<instanceID>.c.<projID>.internal".
func Hostname() (string, error) { return defaultClient.Hostname() }
// InstanceTags returns the list of user-defined instance tags,
// assigned when initially creating a GCE instance.
func InstanceTags() ([]string, error) { return defaultClient.InstanceTags() }
// InstanceID returns the current VM's numeric instance ID.
func InstanceID() (string, error) { return defaultClient.InstanceID() }
// InstanceName returns the current VM's instance ID string.
func InstanceName() (string, error) { return defaultClient.InstanceName() }
// Zone returns the current VM's zone, such as "us-central1-b".
func Zone() (string, error) { return defaultClient.Zone() }
// InstanceAttributes calls Client.InstanceAttributes on the default client.
func InstanceAttributes() ([]string, error) { return defaultClient.InstanceAttributes() }
// ProjectAttributes calls Client.ProjectAttributes on the default client.
func ProjectAttributes() ([]string, error) { return defaultClient.ProjectAttributes() }
// InstanceAttributeValue calls Client.InstanceAttributeValue on the default client.
func InstanceAttributeValue(attr string) (string, error) {
return defaultClient.InstanceAttributeValue(attr)
}
// ProjectAttributeValue calls Client.ProjectAttributeValue on the default client.
func ProjectAttributeValue(attr string) (string, error) {
return defaultClient.ProjectAttributeValue(attr)
}
// Scopes calls Client.Scopes on the default client.
func Scopes(serviceAccount string) ([]string, error) { return defaultClient.Scopes(serviceAccount) }
func strsContains(ss []string, s string) bool {
for _, v := range ss {
if v == s {
return true
}
}
return false
}
// A Client provides metadata.
type Client struct {
hc *http.Client
}
// NewClient returns a Client that can be used to fetch metadata. All HTTP requests
// will use the given http.Client instead of the default client.
func NewClient(c *http.Client) *Client {
return &Client{hc: c}
}
// getETag returns a value from the metadata service as well as the associated ETag.
// This func is otherwise equivalent to Get.
func (c *Client) getETag(suffix string) (value, etag string, err error) {
// Using a fixed IP makes it very difficult to spoof the metadata service in
// a container, which is an important use-case for local testing of cloud
// deployments. To enable spoofing of the metadata service, the environment
// variable GCE_METADATA_HOST is first inspected to decide where metadata
// requests shall go.
host := os.Getenv(metadataHostEnv)
if host == "" {
// Using 169.254.169.254 instead of "metadata" here because Go
// binaries built with the "netgo" tag and without cgo won't
// know the search suffix for "metadata" is
// ".google.internal", and this IP address is documented as
// being stable anyway.
host = metadataIP
}
u := "http://" + host + "/computeMetadata/v1/" + suffix
req, _ := http.NewRequest("GET", u, nil)
req.Header.Set("Metadata-Flavor", "Google")
req.Header.Set("User-Agent", userAgent)
res, err := c.hc.Do(req)
if err != nil {
return "", "", err
}
defer res.Body.Close()
if res.StatusCode == http.StatusNotFound {
return "", "", NotDefinedError(suffix)
}
all, err := ioutil.ReadAll(res.Body)
if err != nil {
return "", "", err
}
if res.StatusCode != 200 {
return "", "", &Error{Code: res.StatusCode, Message: string(all)}
}
return string(all), res.Header.Get("Etag"), nil
}
// Get returns a value from the metadata service.
// The suffix is appended to "http://${GCE_METADATA_HOST}/computeMetadata/v1/".
//
// If the GCE_METADATA_HOST environment variable is not defined, a default of
// 169.254.169.254 will be used instead.
//
// If the requested metadata is not defined, the returned error will
// be of type NotDefinedError.
func (c *Client) Get(suffix string) (string, error) {
val, _, err := c.getETag(suffix)
return val, err
}
func (c *Client) getTrimmed(suffix string) (s string, err error) {
s, err = c.Get(suffix)
s = strings.TrimSpace(s)
return
}
func (c *Client) lines(suffix string) ([]string, error) {
j, err := c.Get(suffix)
if err != nil {
return nil, err
}
s := strings.Split(strings.TrimSpace(j), "\n")
for i := range s {
s[i] = strings.TrimSpace(s[i])
}
return s, nil
}
// ProjectID returns the current instance's project ID string.
func (c *Client) ProjectID() (string, error) { return projID.get(c) }
// NumericProjectID returns the current instance's numeric project ID.
func (c *Client) NumericProjectID() (string, error) { return projNum.get(c) }
// InstanceID returns the current VM's numeric instance ID.
func (c *Client) InstanceID() (string, error) { return instID.get(c) }
// InternalIP returns the instance's primary internal IP address.
func (c *Client) InternalIP() (string, error) {
return c.getTrimmed("instance/network-interfaces/0/ip")
}
// Email returns the email address associated with the service account.
// The account may be empty or the string "default" to use the instance's
// main account.
func (c *Client) Email(serviceAccount string) (string, error) {
if serviceAccount == "" {
serviceAccount = "default"
}
return c.getTrimmed("instance/service-accounts/" + serviceAccount + "/email")
}
// ExternalIP returns the instance's primary external (public) IP address.
func (c *Client) ExternalIP() (string, error) {
return c.getTrimmed("instance/network-interfaces/0/access-configs/0/external-ip")
}
// Hostname returns the instance's hostname. This will be of the form
// "<instanceID>.c.<projID>.internal".
func (c *Client) Hostname() (string, error) {
return c.getTrimmed("instance/hostname")
}
// InstanceTags returns the list of user-defined instance tags,
// assigned when initially creating a GCE instance.
func (c *Client) InstanceTags() ([]string, error) {
var s []string
j, err := c.Get("instance/tags")
if err != nil {
return nil, err
}
if err := json.NewDecoder(strings.NewReader(j)).Decode(&s); err != nil {
return nil, err
}
return s, nil
}
// InstanceName returns the current VM's instance ID string.
func (c *Client) InstanceName() (string, error) {
host, err := c.Hostname()
if err != nil {
return "", err
}
return strings.Split(host, ".")[0], nil
}
// Zone returns the current VM's zone, such as "us-central1-b".
func (c *Client) Zone() (string, error) {
zone, err := c.getTrimmed("instance/zone")
// zone is of the form "projects/<projNum>/zones/<zoneName>".
if err != nil {
return "", err
}
return zone[strings.LastIndex(zone, "/")+1:], nil
}
// InstanceAttributes returns the list of user-defined attributes,
// assigned when initially creating a GCE VM instance. The value of an
// attribute can be obtained with InstanceAttributeValue.
func (c *Client) InstanceAttributes() ([]string, error) { return c.lines("instance/attributes/") }
// ProjectAttributes returns the list of user-defined attributes
// applying to the project as a whole, not just this VM. The value of
// an attribute can be obtained with ProjectAttributeValue.
func (c *Client) ProjectAttributes() ([]string, error) { return c.lines("project/attributes/") }
// InstanceAttributeValue returns the value of the provided VM
// instance attribute.
//
// If the requested attribute is not defined, the returned error will
// be of type NotDefinedError.
//
// InstanceAttributeValue may return ("", nil) if the attribute was
// defined to be the empty string.
func (c *Client) InstanceAttributeValue(attr string) (string, error) {
return c.Get("instance/attributes/" + attr)
}
// ProjectAttributeValue returns the value of the provided
// project attribute.
//
// If the requested attribute is not defined, the returned error will
// be of type NotDefinedError.
//
// ProjectAttributeValue may return ("", nil) if the attribute was
// defined to be the empty string.
func (c *Client) ProjectAttributeValue(attr string) (string, error) {
return c.Get("project/attributes/" + attr)
}
// Scopes returns the service account scopes for the given account.
// The account may be empty or the string "default" to use the instance's
// main account.
func (c *Client) Scopes(serviceAccount string) ([]string, error) {
if serviceAccount == "" {
serviceAccount = "default"
}
return c.lines("instance/service-accounts/" + serviceAccount + "/scopes")
}
// Subscribe subscribes to a value from the metadata service.
// The suffix is appended to "http://${GCE_METADATA_HOST}/computeMetadata/v1/".
// The suffix may contain query parameters.
//
// Subscribe calls fn with the latest metadata value indicated by the provided
// suffix. If the metadata value is deleted, fn is called with the empty string
// and ok false. Subscribe blocks until fn returns a non-nil error or the value
// is deleted. Subscribe returns the error value returned from the last call to
// fn, which may be nil when ok == false.
func (c *Client) Subscribe(suffix string, fn func(v string, ok bool) error) error {
const failedSubscribeSleep = time.Second * 5
// First check to see if the metadata value exists at all.
val, lastETag, err := c.getETag(suffix)
if err != nil {
return err
}
if err := fn(val, true); err != nil {
return err
}
ok := true
if strings.ContainsRune(suffix, '?') {
suffix += "&wait_for_change=true&last_etag="
} else {
suffix += "?wait_for_change=true&last_etag="
}
for {
val, etag, err := c.getETag(suffix + url.QueryEscape(lastETag))
if err != nil {
if _, deleted := err.(NotDefinedError); !deleted {
time.Sleep(failedSubscribeSleep)
continue // Retry on other errors.
}
ok = false
}
lastETag = etag
if err := fn(val, ok); err != nil || !ok {
return err
}
}
}
// Error contains an error response from the server.
type Error struct {
// Code is the HTTP response status code.
Code int
// Message is the server response message.
Message string
}
func (e *Error) Error() string {
return fmt.Sprintf("compute: Received %d `%s`", e.Code, e.Message)
}

View File

@@ -1,396 +0,0 @@
package connect
import (
"bytes"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"net/http"
"net/url"
"os"
"github.com/1Password/connect-sdk-go/onepassword"
opentracing "github.com/opentracing/opentracing-go"
"github.com/opentracing/opentracing-go/ext"
jaegerClientConfig "github.com/uber/jaeger-client-go/config"
"github.com/uber/jaeger-client-go/zipkin"
)
const (
defaultUserAgent = "connect-sdk-go/%s"
)
// Client Represents an available 1Password Connect API to connect to
type Client interface {
GetVaults() ([]onepassword.Vault, error)
GetVaultsByTitle(uuid string) ([]onepassword.Vault, error)
GetItem(uuid string, vaultUUID string) (*onepassword.Item, error)
GetItems(vaultUUID string) ([]onepassword.Item, error)
GetItemsByTitle(title string, vaultUUID string) ([]onepassword.Item, error)
GetItemByTitle(title string, vaultUUID string) (*onepassword.Item, error)
CreateItem(item *onepassword.Item, vaultUUID string) (*onepassword.Item, error)
UpdateItem(item *onepassword.Item, vaultUUID string) (*onepassword.Item, error)
DeleteItem(item *onepassword.Item, vaultUUID string) error
}
type httpClient interface {
Do(req *http.Request) (*http.Response, error)
}
const (
envHostVariable = "OP_CONNECT_HOST"
envTokenVariable = "OP_CONNECT_TOKEN"
)
// NewClientFromEnvironment Returns a Secret Service client assuming that your
// jwt is set in the OP_TOKEN environment variable
func NewClientFromEnvironment() (Client, error) {
host, found := os.LookupEnv(envHostVariable)
if !found {
return nil, fmt.Errorf("There is no hostname available in the %q variable", envHostVariable)
}
token, found := os.LookupEnv(envTokenVariable)
if !found {
return nil, fmt.Errorf("There is no token available in the %q variable", envTokenVariable)
}
return NewClient(host, token), nil
}
// NewClient Returns a Secret Service client for a given url and jwt
func NewClient(url string, token string) Client {
return NewClientWithUserAgent(url, token, fmt.Sprintf(defaultUserAgent, SDKVersion))
}
// NewClientWithUserAgent Returns a Secret Service client for a given url and jwt and identifies with userAgent
func NewClientWithUserAgent(url string, token string, userAgent string) Client {
if !opentracing.IsGlobalTracerRegistered() {
cfg := jaegerClientConfig.Configuration{}
zipkinPropagator := zipkin.NewZipkinB3HTTPHeaderPropagator()
cfg.InitGlobalTracer(
userAgent,
jaegerClientConfig.Injector(opentracing.HTTPHeaders, zipkinPropagator),
jaegerClientConfig.Extractor(opentracing.HTTPHeaders, zipkinPropagator),
jaegerClientConfig.ZipkinSharedRPCSpan(true),
)
}
return &restClient{
URL: url,
Token: token,
userAgent: userAgent,
tracer: opentracing.GlobalTracer(),
client: http.DefaultClient,
}
}
type restClient struct {
URL string
Token string
userAgent string
tracer opentracing.Tracer
client httpClient
}
// GetVaults Get a list of all available vaults
func (rs *restClient) GetVaults() ([]onepassword.Vault, error) {
span := rs.tracer.StartSpan("GetVaults")
defer span.Finish()
vaultURL := fmt.Sprintf("/v1/vaults")
request, err := rs.buildRequest(http.MethodGet, vaultURL, http.NoBody, span)
if err != nil {
return nil, err
}
response, err := rs.client.Do(request)
if err != nil {
return nil, err
}
if response.StatusCode != http.StatusOK {
return nil, fmt.Errorf("Unable to retrieve vaults. Receieved %q for %q", response.Status, vaultURL)
}
body, err := ioutil.ReadAll(response.Body)
if err != nil {
return nil, err
}
vaults := []onepassword.Vault{}
if err := json.Unmarshal(body, &vaults); err != nil {
return nil, err
}
return vaults, nil
}
func (rs *restClient) GetVaultsByTitle(title string) ([]onepassword.Vault, error) {
span := rs.tracer.StartSpan("GetVaultsByTitle")
defer span.Finish()
filter := url.QueryEscape(fmt.Sprintf("title eq \"%s\"", title))
itemURL := fmt.Sprintf("/v1/vaults?filter=%s", filter)
request, err := rs.buildRequest(http.MethodGet, itemURL, http.NoBody, span)
if err != nil {
return nil, err
}
response, err := rs.client.Do(request)
if err != nil {
return nil, err
}
if response.StatusCode != http.StatusOK {
return nil, fmt.Errorf("Unable to retrieve vaults. Receieved %q for %q", response.Status, itemURL)
}
body, err := ioutil.ReadAll(response.Body)
if err != nil {
return nil, err
}
vaults := []onepassword.Vault{}
if err := json.Unmarshal(body, &vaults); err != nil {
return nil, err
}
return vaults, nil
}
// GetItem Get a specific Item from the 1Password Connect API
func (rs *restClient) GetItem(uuid string, vaultUUID string) (*onepassword.Item, error) {
span := rs.tracer.StartSpan("GetItem")
defer span.Finish()
itemURL := fmt.Sprintf("/v1/vaults/%s/items/%s", vaultUUID, uuid)
request, err := rs.buildRequest(http.MethodGet, itemURL, http.NoBody, span)
if err != nil {
return nil, err
}
response, err := rs.client.Do(request)
if err != nil {
return nil, err
}
if response.StatusCode != http.StatusOK {
return nil, fmt.Errorf("Unable to retrieve item. Receieved %q for %q", response.Status, itemURL)
}
body, err := ioutil.ReadAll(response.Body)
if err != nil {
return nil, err
}
item := onepassword.Item{}
if err := json.Unmarshal(body, &item); err != nil {
return nil, err
}
return &item, nil
}
func (rs *restClient) GetItemByTitle(title string, vaultUUID string) (*onepassword.Item, error) {
span := rs.tracer.StartSpan("GetItemByTitle")
defer span.Finish()
items, err := rs.GetItemsByTitle(title, vaultUUID)
if err != nil {
return nil, err
}
if len(items) != 1 {
return nil, fmt.Errorf("Found %d item(s) in vault %q with title %q", len(items), vaultUUID, title)
}
return rs.GetItem(items[0].ID, items[0].Vault.ID)
}
func (rs *restClient) GetItemsByTitle(title string, vaultUUID string) ([]onepassword.Item, error) {
span := rs.tracer.StartSpan("GetItemsByTitle")
defer span.Finish()
filter := url.QueryEscape(fmt.Sprintf("title eq \"%s\"", title))
itemURL := fmt.Sprintf("/v1/vaults/%s/items?filter=%s", vaultUUID, filter)
request, err := rs.buildRequest(http.MethodGet, itemURL, http.NoBody, span)
if err != nil {
return nil, err
}
response, err := rs.client.Do(request)
if err != nil {
return nil, err
}
if response.StatusCode != http.StatusOK {
return nil, fmt.Errorf("Unable to retrieve item. Receieved %q for %q", response.Status, itemURL)
}
body, err := ioutil.ReadAll(response.Body)
if err != nil {
return nil, err
}
items := []onepassword.Item{}
if err := json.Unmarshal(body, &items); err != nil {
return nil, err
}
return items, nil
}
func (rs *restClient) GetItems(vaultUUID string) ([]onepassword.Item, error) {
span := rs.tracer.StartSpan("GetItems")
defer span.Finish()
itemURL := fmt.Sprintf("/v1/vaults/%s/items", vaultUUID)
request, err := rs.buildRequest(http.MethodGet, itemURL, http.NoBody, span)
if err != nil {
return nil, err
}
response, err := rs.client.Do(request)
if err != nil {
return nil, err
}
if response.StatusCode != http.StatusOK {
return nil, fmt.Errorf("Unable to retrieve items. Receieved %q for %q", response.Status, itemURL)
}
body, err := ioutil.ReadAll(response.Body)
if err != nil {
return nil, err
}
items := []onepassword.Item{}
if err := json.Unmarshal(body, &items); err != nil {
return nil, err
}
return items, nil
}
// CreateItem Create a new item in a specified vault
func (rs *restClient) CreateItem(item *onepassword.Item, vaultUUID string) (*onepassword.Item, error) {
span := rs.tracer.StartSpan("CreateItem")
defer span.Finish()
itemURL := fmt.Sprintf("/v1/vaults/%s/items", vaultUUID)
itemBody, err := json.Marshal(item)
if err != nil {
return nil, err
}
request, err := rs.buildRequest(http.MethodPost, itemURL, bytes.NewBuffer(itemBody), span)
if err != nil {
return nil, err
}
response, err := rs.client.Do(request)
if err != nil {
return nil, err
}
if response.StatusCode != http.StatusOK {
return nil, fmt.Errorf("Unable to create item. Receieved %q for %q", response.Status, itemURL)
}
body, err := ioutil.ReadAll(response.Body)
if err != nil {
return nil, err
}
newItem := onepassword.Item{}
if err := json.Unmarshal(body, &newItem); err != nil {
return nil, err
}
return &newItem, nil
}
// UpdateItem Update a new item in a specified vault
func (rs *restClient) UpdateItem(item *onepassword.Item, vaultUUID string) (*onepassword.Item, error) {
span := rs.tracer.StartSpan("UpdateItem")
defer span.Finish()
itemURL := fmt.Sprintf("/v1/vaults/%s/items/%s", item.Vault.ID, item.ID)
itemBody, err := json.Marshal(item)
if err != nil {
return nil, err
}
request, err := rs.buildRequest(http.MethodPut, itemURL, bytes.NewBuffer(itemBody), span)
if err != nil {
return nil, err
}
response, err := rs.client.Do(request)
if err != nil {
return nil, err
}
if response.StatusCode != http.StatusOK {
return nil, fmt.Errorf("Unable to update item. Receieved %q for %q", response.Status, itemURL)
}
body, err := ioutil.ReadAll(response.Body)
if err != nil {
return nil, err
}
newItem := onepassword.Item{}
if err := json.Unmarshal(body, &newItem); err != nil {
return nil, err
}
return &newItem, nil
}
// DeleteItem Delete a new item in a specified vault
func (rs *restClient) DeleteItem(item *onepassword.Item, vaultUUID string) error {
span := rs.tracer.StartSpan("DeleteItem")
defer span.Finish()
itemURL := fmt.Sprintf("/v1/vaults/%s/items/%s", item.Vault.ID, item.ID)
request, err := rs.buildRequest(http.MethodDelete, itemURL, http.NoBody, span)
if err != nil {
return err
}
response, err := rs.client.Do(request)
if err != nil {
return err
}
if response.StatusCode != http.StatusNoContent {
return fmt.Errorf("Unable to retrieve item. Receieved %q for %q", response.Status, itemURL)
}
return nil
}
func (rs *restClient) buildRequest(method string, path string, body io.Reader, span opentracing.Span) (*http.Request, error) {
url := fmt.Sprintf("%s%s", rs.URL, path)
request, err := http.NewRequest(method, url, body)
if err != nil {
return nil, err
}
request.Header.Set("Content-Type", "application/json")
request.Header.Set("Authorization", fmt.Sprintf("Bearer %s", rs.Token))
request.Header.Set("User-Agent", rs.userAgent)
ext.SpanKindRPCClient.Set(span)
ext.HTTPUrl.Set(span, path)
ext.HTTPMethod.Set(span, method)
rs.tracer.Inject(span.Context(), opentracing.HTTPHeaders, opentracing.HTTPHeadersCarrier(request.Header))
return request, nil
}

View File

@@ -1,172 +0,0 @@
package connect
import (
"fmt"
"os"
"reflect"
"strconv"
"strings"
"github.com/1Password/connect-sdk-go/onepassword"
)
const (
vaultTag = "opvault"
itemTag = "opitem"
fieldTag = "opfield"
envVaultVar = "OP_VAULT"
)
type parsedItem struct {
vaultUUID string
itemTitle string
fields []*reflect.StructField
values []*reflect.Value
}
// Load Load configuration values based on strcut tag
func Load(client Client, i interface{}) error {
configP := reflect.ValueOf(i)
if configP.Kind() != reflect.Ptr {
return fmt.Errorf("You must pass a pointer to Config struct")
}
config := configP.Elem()
if config.Kind() != reflect.Struct {
return fmt.Errorf("Config values can only be loaded into a struct")
}
t := config.Type()
// Multiple fields may be from a single item so we will collect them
items := map[string]parsedItem{}
// Fetch the Vault from the environment
vaultUUID, envVarFound := os.LookupEnv(envVaultVar)
for i := 0; i < t.NumField(); i++ {
value := config.Field(i)
field := t.Field(i)
tag := field.Tag.Get(itemTag)
if tag == "" {
continue
}
if !value.CanSet() {
return fmt.Errorf("Cannot load config into private fields")
}
itemVault, err := vaultUUIDForField(&field, vaultUUID, envVarFound)
if err != nil {
return err
}
key := fmt.Sprintf("%s/%s", itemVault, tag)
parsed := items[key]
parsed.vaultUUID = itemVault
parsed.itemTitle = tag
parsed.fields = append(parsed.fields, &field)
parsed.values = append(parsed.values, &value)
items[key] = parsed
}
for _, item := range items {
if err := setValuesForTag(client, &item); err != nil {
return err
}
}
return nil
}
func vaultUUIDForField(field *reflect.StructField, vaultUUID string, envVaultFound bool) (string, error) {
// Check to see if a specific vault has been specified on the field
// If the env vault id has not been found and item doesn't have a vault
// return an error
if vaultUUIDTag := field.Tag.Get(vaultTag); vaultUUIDTag == "" {
if !envVaultFound {
return "", fmt.Errorf("There is no vault for %q field", field.Name)
}
} else {
return vaultUUIDTag, nil
}
return vaultUUID, nil
}
func setValuesForTag(client Client, parsedItem *parsedItem) error {
item, err := client.GetItemByTitle(parsedItem.itemTitle, parsedItem.vaultUUID)
if err != nil {
return err
}
for i, field := range parsedItem.fields {
value := parsedItem.values[i]
path := field.Tag.Get(fieldTag)
if path == "" {
if field.Type == reflect.TypeOf(onepassword.Item{}) {
value.Set(reflect.ValueOf(*item))
return nil
}
return fmt.Errorf("There is no %q specified for %q", fieldTag, field.Name)
}
pathParts := strings.Split(path, ".")
if len(pathParts) != 2 {
return fmt.Errorf("Invalid field path format for %q", field.Name)
}
sectionID := sectionIDForName(pathParts[0], item.Sections)
label := pathParts[1]
for _, f := range item.Fields {
fieldSectionID := ""
if f.Section != nil {
fieldSectionID = f.Section.ID
}
if fieldSectionID == sectionID && f.Label == label {
if err := setValue(value, f.Value); err != nil {
return err
}
break
}
}
}
return nil
}
func setValue(value *reflect.Value, toSet string) error {
switch value.Kind() {
case reflect.String:
value.SetString(toSet)
case reflect.Int:
v, err := strconv.Atoi(toSet)
if err != nil {
return err
}
value.SetInt(int64(v))
default:
return fmt.Errorf("Unsupported type %q. Only string, int64, and onepassword.Item are supported", value.Kind())
}
return nil
}
func sectionIDForName(name string, sections []*onepassword.ItemSection) string {
if sections == nil {
return ""
}
for _, s := range sections {
if name == strings.ToLower(s.Label) {
return s.ID
}
}
return ""
}

View File

@@ -1,5 +0,0 @@
package connect
// SDKVersion is the latest Semantic Version of the library
// Do not rename this variable without changing the regex in the Makefile
const SDKVersion = "1.0.1"

View File

@@ -1,158 +0,0 @@
package onepassword
import (
"encoding/json"
"strings"
"time"
)
// ItemCategory Represents the template of the Item
type ItemCategory string
const (
Login ItemCategory = "LOGIN"
Password ItemCategory = "PASSWORD"
Server ItemCategory = "SERVER"
Database ItemCategory = "DATABASE"
CreditCard ItemCategory = "CREDIT_CARD"
Membership ItemCategory = "MEMBERSHIP"
Passport ItemCategory = "PASSPORT"
SoftwareLicense ItemCategory = "SOFTWARE_LICENSE"
OutdoorLicense ItemCategory = "OUTDOOR_LICENSE"
SecureNote ItemCategory = "SECURE_NOTE"
WirelessRouter ItemCategory = "WIRELESS_ROUTER"
BankAccount ItemCategory = "BANK_ACCOUNT"
DriverLicense ItemCategory = "DRIVER_LICENSE"
Identity ItemCategory = "IDENTITY"
RewardProgram ItemCategory = "REWARD_PROGRAM"
Document ItemCategory = "DOCUMENT"
EmailAccount ItemCategory = "EMAIL_ACCOUNT"
SocialSecurityNumber ItemCategory = "SOCIAL_SECURITY_NUMBER"
Custom ItemCategory = "CUSTOM"
)
// UnmarshalJSON Unmarshall Item Category enum strings to Go string enums
func (ic *ItemCategory) UnmarshalJSON(b []byte) error {
var s string
json.Unmarshal(b, &s)
category := ItemCategory(s)
switch category {
case Login, Password, Server, Database, CreditCard, Membership, Passport, SoftwareLicense,
OutdoorLicense, SecureNote, WirelessRouter, BankAccount, DriverLicense, Identity, RewardProgram,
Document, EmailAccount, SocialSecurityNumber:
*ic = category
default:
*ic = Custom
}
return nil
}
// Item represents an item returned to the consumer
type Item struct {
ID string `json:"id"`
Title string `json:"title"`
URLs []ItemURL `json:"urls,omitempty"`
Favorite bool `json:"favorite,omitempty"`
Tags []string `json:"tags,omitempty"`
Version int `json:"version,omitempty"`
Trashed bool `json:"trashed,omitempty"`
Vault ItemVault `json:"vault"`
Category ItemCategory `json:"category,omitempty"` // TODO: switch this to `category`
Sections []*ItemSection `json:"sections,omitempty"`
Fields []*ItemField `json:"fields,omitempty"`
LastEditedBy string `json:"lastEditedBy,omitempty"`
CreatedAt time.Time `json:"createdAt,omitempty"`
UpdatedAt time.Time `json:"updatedAt,omitempty"`
}
// ItemVault represents the Vault the Item is found in
type ItemVault struct {
ID string `json:"id"`
}
// ItemURL is a simplified item URL
type ItemURL struct {
Primary bool `json:"primary,omitempty"`
URL string `json:"href"`
}
// ItemSection Representation of a Section on an item
type ItemSection struct {
ID string `json:"id,omitempty"`
Label string `json:"label,omitempty"`
}
// GeneratorRecipe Representation of a "recipe" used to generate a field
type GeneratorRecipe struct {
Length int `json:"length,omitempty"`
CharacterSets []string `json:"characterSets,omitempty"`
}
// ItemField Representation of a single field on an Item
type ItemField struct {
ID string `json:"id"`
Section *ItemSection `json:"section,omitempty"`
Type string `json:"type"`
Purpose string `json:"purpose,omitempty"`
Label string `json:"label,omitempty"`
Value string `json:"value,omitempty"`
Generate bool `json:"generate,omitempty"`
Recipe *GeneratorRecipe `json:"recipe,omitempty"`
Entropy float64 `json:"entropy,omitempty"`
}
// Get Retrieve the value of a field on the item by its label. To specify a
// field from a specific section pass in <section label>.<field label>. If
// no field matching the selector is found return "".
func (i *Item) GetValue(field string) string {
if i == nil || len(i.Fields) == 0 {
return ""
}
sectionFilter := false
sectionLabel := ""
fieldLabel := field
if strings.Contains(field, ".") {
parts := strings.Split(field, ".")
// Test to make sure the . isn't the last character
if len(parts) == 2 {
sectionFilter = true
sectionLabel = parts[0]
fieldLabel = parts[1]
}
}
for _, f := range i.Fields {
if sectionFilter {
if f.Section != nil {
if sectionLabel != i.SectionLabelForID(f.Section.ID) {
continue
}
}
}
if fieldLabel == f.Label {
return f.Value
}
}
return ""
}
func (i *Item) SectionLabelForID(id string) string {
if i != nil || len(i.Sections) > 0 {
for _, s := range i.Sections {
if s.ID == id {
return s.Label
}
}
}
return ""
}

View File

@@ -1,46 +0,0 @@
package onepassword
import (
"encoding/json"
"time"
)
// Vault represents a 1password Vault
type Vault struct {
ID string `json:"id"`
Name string `json:"name,omitempty"`
Description string `json:"description,omitempty"`
AttrVersion int `json:"attributeVersion,omitempty"`
ContentVersoin int `json:"contentVersion,omitempty"`
Items int `json:"items,omitempty"`
Type VaultType `json:"type,omitempty"`
CreatedAt time.Time `json:"createdAt,omitempty"`
UpdatedAt time.Time `json:"updatedAt,omitempty"`
}
// VaultType Representation of what the Vault Type is
type VaultType string
const (
PersonalVault VaultType = "PERSONAL"
EveryoneVault VaultType = "EVERYONE"
TransferVault VaultType = "TRANSFER"
UserCreatedVault VaultType = "USER_CREATED"
UnknownVault VaultType = "UNKNOWN"
)
// UnmarshalJSON Unmarshall Vault Type enum strings to Go string enums
func (vt *VaultType) UnmarshalJSON(b []byte) error {
var s string
json.Unmarshal(b, &s)
vaultType := VaultType(s)
switch vaultType {
case PersonalVault, EveryoneVault, TransferVault, UserCreatedVault:
*vt = vaultType
default:
*vt = UnknownVault
}
return nil
}

View File

@@ -1,191 +0,0 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
Copyright 2015 Microsoft Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@@ -1,191 +0,0 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
Copyright 2015 Microsoft Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@@ -1,292 +0,0 @@
# Azure Active Directory authentication for Go
This is a standalone package for authenticating with Azure Active
Directory from other Go libraries and applications, in particular the [Azure SDK
for Go](https://github.com/Azure/azure-sdk-for-go).
Note: Despite the package's name it is not related to other "ADAL" libraries
maintained in the [github.com/AzureAD](https://github.com/AzureAD) org. Issues
should be opened in [this repo's](https://github.com/Azure/go-autorest/issues)
or [the SDK's](https://github.com/Azure/azure-sdk-for-go/issues) issue
trackers.
## Install
```bash
go get -u github.com/Azure/go-autorest/autorest/adal
```
## Usage
An Active Directory application is required in order to use this library. An application can be registered in the [Azure Portal](https://portal.azure.com/) by following these [guidelines](https://docs.microsoft.com/en-us/azure/active-directory/develop/active-directory-integrating-applications) or using the [Azure CLI](https://github.com/Azure/azure-cli).
### Register an Azure AD Application with secret
1. Register a new application with a `secret` credential
```
az ad app create \
--display-name example-app \
--homepage https://example-app/home \
--identifier-uris https://example-app/app \
--password secret
```
2. Create a service principal using the `Application ID` from previous step
```
az ad sp create --id "Application ID"
```
* Replace `Application ID` with `appId` from step 1.
### Register an Azure AD Application with certificate
1. Create a private key
```
openssl genrsa -out "example-app.key" 2048
```
2. Create the certificate
```
openssl req -new -key "example-app.key" -subj "/CN=example-app" -out "example-app.csr"
openssl x509 -req -in "example-app.csr" -signkey "example-app.key" -out "example-app.crt" -days 10000
```
3. Create the PKCS12 version of the certificate containing also the private key
```
openssl pkcs12 -export -out "example-app.pfx" -inkey "example-app.key" -in "example-app.crt" -passout pass:
```
4. Register a new application with the certificate content form `example-app.crt`
```
certificateContents="$(tail -n+2 "example-app.crt" | head -n-1)"
az ad app create \
--display-name example-app \
--homepage https://example-app/home \
--identifier-uris https://example-app/app \
--key-usage Verify --end-date 2018-01-01 \
--key-value "${certificateContents}"
```
5. Create a service principal using the `Application ID` from previous step
```
az ad sp create --id "APPLICATION_ID"
```
* Replace `APPLICATION_ID` with `appId` from step 4.
### Grant the necessary permissions
Azure relies on a Role-Based Access Control (RBAC) model to manage the access to resources at a fine-grained
level. There is a set of [pre-defined roles](https://docs.microsoft.com/en-us/azure/active-directory/role-based-access-built-in-roles)
which can be assigned to a service principal of an Azure AD application depending of your needs.
```
az role assignment create --assigner "SERVICE_PRINCIPAL_ID" --role "ROLE_NAME"
```
* Replace the `SERVICE_PRINCIPAL_ID` with the `appId` from previous step.
* Replace the `ROLE_NAME` with a role name of your choice.
It is also possible to define custom role definitions.
```
az role definition create --role-definition role-definition.json
```
* Check [custom roles](https://docs.microsoft.com/en-us/azure/active-directory/role-based-access-control-custom-roles) for more details regarding the content of `role-definition.json` file.
### Acquire Access Token
The common configuration used by all flows:
```Go
const activeDirectoryEndpoint = "https://login.microsoftonline.com/"
tenantID := "TENANT_ID"
oauthConfig, err := adal.NewOAuthConfig(activeDirectoryEndpoint, tenantID)
applicationID := "APPLICATION_ID"
callback := func(token adal.Token) error {
// This is called after the token is acquired
}
// The resource for which the token is acquired
resource := "https://management.core.windows.net/"
```
* Replace the `TENANT_ID` with your tenant ID.
* Replace the `APPLICATION_ID` with the value from previous section.
#### Client Credentials
```Go
applicationSecret := "APPLICATION_SECRET"
spt, err := adal.NewServicePrincipalToken(
*oauthConfig,
appliationID,
applicationSecret,
resource,
callbacks...)
if err != nil {
return nil, err
}
// Acquire a new access token
err = spt.Refresh()
if (err == nil) {
token := spt.Token
}
```
* Replace the `APPLICATION_SECRET` with the `password` value from previous section.
#### Client Certificate
```Go
certificatePath := "./example-app.pfx"
certData, err := ioutil.ReadFile(certificatePath)
if err != nil {
return nil, fmt.Errorf("failed to read the certificate file (%s): %v", certificatePath, err)
}
// Get the certificate and private key from pfx file
certificate, rsaPrivateKey, err := decodePkcs12(certData, "")
if err != nil {
return nil, fmt.Errorf("failed to decode pkcs12 certificate while creating spt: %v", err)
}
spt, err := adal.NewServicePrincipalTokenFromCertificate(
*oauthConfig,
applicationID,
certificate,
rsaPrivateKey,
resource,
callbacks...)
// Acquire a new access token
err = spt.Refresh()
if (err == nil) {
token := spt.Token
}
```
* Update the certificate path to point to the example-app.pfx file which was created in previous section.
#### Device Code
```Go
oauthClient := &http.Client{}
// Acquire the device code
deviceCode, err := adal.InitiateDeviceAuth(
oauthClient,
*oauthConfig,
applicationID,
resource)
if err != nil {
return nil, fmt.Errorf("Failed to start device auth flow: %s", err)
}
// Display the authentication message
fmt.Println(*deviceCode.Message)
// Wait here until the user is authenticated
token, err := adal.WaitForUserCompletion(oauthClient, deviceCode)
if err != nil {
return nil, fmt.Errorf("Failed to finish device auth flow: %s", err)
}
spt, err := adal.NewServicePrincipalTokenFromManualToken(
*oauthConfig,
applicationID,
resource,
*token,
callbacks...)
if (err == nil) {
token := spt.Token
}
```
#### Username password authenticate
```Go
spt, err := adal.NewServicePrincipalTokenFromUsernamePassword(
*oauthConfig,
applicationID,
username,
password,
resource,
callbacks...)
if (err == nil) {
token := spt.Token
}
```
#### Authorization code authenticate
``` Go
spt, err := adal.NewServicePrincipalTokenFromAuthorizationCode(
*oauthConfig,
applicationID,
clientSecret,
authorizationCode,
redirectURI,
resource,
callbacks...)
err = spt.Refresh()
if (err == nil) {
token := spt.Token
}
```
### Command Line Tool
A command line tool is available in `cmd/adal.go` that can acquire a token for a given resource. It supports all flows mentioned above.
```
adal -h
Usage of ./adal:
-applicationId string
application id
-certificatePath string
path to pk12/PFC application certificate
-mode string
authentication mode (device, secret, cert, refresh) (default "device")
-resource string
resource for which the token is requested
-secret string
application secret
-tenantId string
tenant id
-tokenCachePath string
location of oath token cache (default "/home/cgc/.adal/accessToken.json")
```
Example acquire a token for `https://management.core.windows.net/` using device code flow:
```
adal -mode device \
-applicationId "APPLICATION_ID" \
-tenantId "TENANT_ID" \
-resource https://management.core.windows.net/
```

View File

@@ -1,151 +0,0 @@
package adal
// Copyright 2017 Microsoft Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
import (
"errors"
"fmt"
"net/url"
)
const (
activeDirectoryEndpointTemplate = "%s/oauth2/%s%s"
)
// OAuthConfig represents the endpoints needed
// in OAuth operations
type OAuthConfig struct {
AuthorityEndpoint url.URL `json:"authorityEndpoint"`
AuthorizeEndpoint url.URL `json:"authorizeEndpoint"`
TokenEndpoint url.URL `json:"tokenEndpoint"`
DeviceCodeEndpoint url.URL `json:"deviceCodeEndpoint"`
}
// IsZero returns true if the OAuthConfig object is zero-initialized.
func (oac OAuthConfig) IsZero() bool {
return oac == OAuthConfig{}
}
func validateStringParam(param, name string) error {
if len(param) == 0 {
return fmt.Errorf("parameter '" + name + "' cannot be empty")
}
return nil
}
// NewOAuthConfig returns an OAuthConfig with tenant specific urls
func NewOAuthConfig(activeDirectoryEndpoint, tenantID string) (*OAuthConfig, error) {
apiVer := "1.0"
return NewOAuthConfigWithAPIVersion(activeDirectoryEndpoint, tenantID, &apiVer)
}
// NewOAuthConfigWithAPIVersion returns an OAuthConfig with tenant specific urls.
// If apiVersion is not nil the "api-version" query parameter will be appended to the endpoint URLs with the specified value.
func NewOAuthConfigWithAPIVersion(activeDirectoryEndpoint, tenantID string, apiVersion *string) (*OAuthConfig, error) {
if err := validateStringParam(activeDirectoryEndpoint, "activeDirectoryEndpoint"); err != nil {
return nil, err
}
api := ""
// it's legal for tenantID to be empty so don't validate it
if apiVersion != nil {
if err := validateStringParam(*apiVersion, "apiVersion"); err != nil {
return nil, err
}
api = fmt.Sprintf("?api-version=%s", *apiVersion)
}
u, err := url.Parse(activeDirectoryEndpoint)
if err != nil {
return nil, err
}
authorityURL, err := u.Parse(tenantID)
if err != nil {
return nil, err
}
authorizeURL, err := u.Parse(fmt.Sprintf(activeDirectoryEndpointTemplate, tenantID, "authorize", api))
if err != nil {
return nil, err
}
tokenURL, err := u.Parse(fmt.Sprintf(activeDirectoryEndpointTemplate, tenantID, "token", api))
if err != nil {
return nil, err
}
deviceCodeURL, err := u.Parse(fmt.Sprintf(activeDirectoryEndpointTemplate, tenantID, "devicecode", api))
if err != nil {
return nil, err
}
return &OAuthConfig{
AuthorityEndpoint: *authorityURL,
AuthorizeEndpoint: *authorizeURL,
TokenEndpoint: *tokenURL,
DeviceCodeEndpoint: *deviceCodeURL,
}, nil
}
// MultiTenantOAuthConfig provides endpoints for primary and aulixiary tenant IDs.
type MultiTenantOAuthConfig interface {
PrimaryTenant() *OAuthConfig
AuxiliaryTenants() []*OAuthConfig
}
// OAuthOptions contains optional OAuthConfig creation arguments.
type OAuthOptions struct {
APIVersion string
}
func (c OAuthOptions) apiVersion() string {
if c.APIVersion != "" {
return fmt.Sprintf("?api-version=%s", c.APIVersion)
}
return "1.0"
}
// NewMultiTenantOAuthConfig creates an object that support multitenant OAuth configuration.
// See https://docs.microsoft.com/en-us/azure/azure-resource-manager/authenticate-multi-tenant for more information.
func NewMultiTenantOAuthConfig(activeDirectoryEndpoint, primaryTenantID string, auxiliaryTenantIDs []string, options OAuthOptions) (MultiTenantOAuthConfig, error) {
if len(auxiliaryTenantIDs) == 0 || len(auxiliaryTenantIDs) > 3 {
return nil, errors.New("must specify one to three auxiliary tenants")
}
mtCfg := multiTenantOAuthConfig{
cfgs: make([]*OAuthConfig, len(auxiliaryTenantIDs)+1),
}
apiVer := options.apiVersion()
pri, err := NewOAuthConfigWithAPIVersion(activeDirectoryEndpoint, primaryTenantID, &apiVer)
if err != nil {
return nil, fmt.Errorf("failed to create OAuthConfig for primary tenant: %v", err)
}
mtCfg.cfgs[0] = pri
for i := range auxiliaryTenantIDs {
aux, err := NewOAuthConfig(activeDirectoryEndpoint, auxiliaryTenantIDs[i])
if err != nil {
return nil, fmt.Errorf("failed to create OAuthConfig for tenant '%s': %v", auxiliaryTenantIDs[i], err)
}
mtCfg.cfgs[i+1] = aux
}
return mtCfg, nil
}
type multiTenantOAuthConfig struct {
// first config in the slice is the primary tenant
cfgs []*OAuthConfig
}
func (m multiTenantOAuthConfig) PrimaryTenant() *OAuthConfig {
return m.cfgs[0]
}
func (m multiTenantOAuthConfig) AuxiliaryTenants() []*OAuthConfig {
return m.cfgs[1:]
}

Some files were not shown because too many files have changed in this diff Show More