mirror of
https://github.com/1Password/onepassword-operator.git
synced 2025-10-22 07:28:06 +00:00
Refactor kube package to use controller-runtime
golang client to interact with cluster
This commit is contained in:
@@ -2,14 +2,18 @@ package kube
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
//nolint:staticcheck // ST1001
|
||||
. "github.com/onsi/ginkgo/v2"
|
||||
//nolint:staticcheck // ST1001
|
||||
. "github.com/onsi/gomega"
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
"github.com/1Password/onepassword-operator/pkg/testhelper/defaults"
|
||||
)
|
||||
|
||||
type Deployment struct {
|
||||
@@ -18,9 +22,7 @@ type Deployment struct {
|
||||
name string
|
||||
}
|
||||
|
||||
func (d *Deployment) ReadEnvVar(ctx context.Context, envVarName string) string {
|
||||
By("Reading " + envVarName + " value from deployment/" + d.name)
|
||||
|
||||
func (d *Deployment) Get(ctx context.Context) *appsv1.Deployment {
|
||||
// Derive a short-lived context so this API call won't hang indefinitely.
|
||||
c, cancel := context.WithTimeout(ctx, 10*time.Second)
|
||||
defer cancel()
|
||||
@@ -29,6 +31,13 @@ func (d *Deployment) ReadEnvVar(ctx context.Context, envVarName string) string {
|
||||
err := d.client.Get(c, client.ObjectKey{Name: d.name, Namespace: d.config.Namespace}, deployment)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
return deployment
|
||||
}
|
||||
|
||||
func (d *Deployment) ReadEnvVar(ctx context.Context, envVarName string) string {
|
||||
By("Reading " + envVarName + " value from deployment/" + d.name)
|
||||
deployment := d.Get(ctx)
|
||||
|
||||
// Search env across all containers
|
||||
found := ""
|
||||
for _, container := range deployment.Spec.Template.Spec.Containers {
|
||||
@@ -43,3 +52,81 @@ func (d *Deployment) ReadEnvVar(ctx context.Context, envVarName string) string {
|
||||
Expect(found).NotTo(BeEmpty())
|
||||
return found
|
||||
}
|
||||
|
||||
func (d *Deployment) PatchEnvVars(ctx context.Context, upsert []corev1.EnvVar, remove []string) {
|
||||
By("Patching env variables for deployment/" + d.name)
|
||||
deployment := d.Get(ctx)
|
||||
deploymentCopy := deployment.DeepCopy()
|
||||
container := &deployment.Spec.Template.Spec.Containers[0]
|
||||
|
||||
// Build removal set for quick lookup
|
||||
toRemove := make(map[string]struct{}, len(remove))
|
||||
for _, n := range remove {
|
||||
toRemove[n] = struct{}{}
|
||||
}
|
||||
|
||||
// Build upsert map for quick lookup
|
||||
upserts := make(map[string]corev1.EnvVar, len(upsert))
|
||||
for _, e := range upsert {
|
||||
upserts[e.Name] = e
|
||||
}
|
||||
|
||||
// Filter existing envs: keep if not in remove and not being upserted
|
||||
filtered := make([]corev1.EnvVar, 0, len(container.Env))
|
||||
for _, e := range container.Env {
|
||||
if _, ok := toRemove[e.Name]; ok {
|
||||
continue
|
||||
}
|
||||
if newE, ok := upserts[e.Name]; ok {
|
||||
filtered = append(filtered, newE) // replace existing
|
||||
delete(upserts, e.Name) // delete from map to not use once again
|
||||
} else {
|
||||
filtered = append(filtered, e)
|
||||
}
|
||||
}
|
||||
|
||||
// Append any new envs that weren’t already in the container
|
||||
for _, e := range upserts {
|
||||
filtered = append(filtered, e)
|
||||
}
|
||||
|
||||
container.Env = filtered
|
||||
|
||||
// Derive a short-lived context so this API call won't hang indefinitely.
|
||||
c, cancel := context.WithTimeout(ctx, 10*time.Second)
|
||||
defer cancel()
|
||||
|
||||
err := d.client.Patch(c, deployment, client.MergeFrom(deploymentCopy))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
// wait for new deployment to roll out
|
||||
d.WaitDeploymentRolledOut(ctx)
|
||||
}
|
||||
|
||||
// WaitDeploymentRolledOut waits for deployment to finish a rollout.
|
||||
func (d *Deployment) WaitDeploymentRolledOut(ctx context.Context) {
|
||||
By("Waiting for deployment/" + d.name + " to roll out")
|
||||
|
||||
deployment := d.Get(ctx)
|
||||
targetGen := deployment.Generation
|
||||
|
||||
Eventually(func(g Gomega) error {
|
||||
newDeployment := d.Get(ctx)
|
||||
// Has controller observed the new spec?
|
||||
if newDeployment.Status.ObservedGeneration < targetGen {
|
||||
return fmt.Errorf("observedGeneration %d < desired %d", newDeployment.Status.ObservedGeneration, targetGen)
|
||||
}
|
||||
g.Expect(newDeployment.Status.ObservedGeneration).To(BeNumerically(">=", targetGen))
|
||||
|
||||
desired := int32(1)
|
||||
if newDeployment.Spec.Replicas != nil {
|
||||
desired = *newDeployment.Spec.Replicas
|
||||
}
|
||||
|
||||
g.Expect(newDeployment.Status.UpdatedReplicas).To(Equal(desired))
|
||||
g.Expect(newDeployment.Status.AvailableReplicas).To(Equal(desired))
|
||||
g.Expect(newDeployment.Status.Replicas).To(Equal(desired))
|
||||
|
||||
return nil
|
||||
}, defaults.E2ETimeout, defaults.E2EInterval).Should(Succeed())
|
||||
}
|
||||
|
@@ -2,35 +2,25 @@ package kube
|
||||
|
||||
import (
|
||||
"context"
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
|
||||
//"encoding/base64"
|
||||
"fmt"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
//"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sigs.k8s.io/yaml"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
//metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
//"k8s.io/client-go/kubernetes"
|
||||
//"k8s.io/client-go/rest"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
//nolint:staticcheck // ST1001
|
||||
. "github.com/onsi/ginkgo/v2"
|
||||
//nolint:staticcheck // ST1001
|
||||
. "github.com/onsi/gomega"
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/yaml"
|
||||
|
||||
//"github.com/1Password/onepassword-operator/pkg/testhelper/defaults"
|
||||
apiv1 "github.com/1Password/onepassword-operator/api/v1"
|
||||
"github.com/1Password/onepassword-operator/pkg/testhelper/system"
|
||||
)
|
||||
|
||||
type ClusterConfig struct {
|
||||
@@ -61,6 +51,21 @@ func NewKubeClient(clusterConfig *ClusterConfig) *Kube {
|
||||
kubernetesClient, err := client.New(restConfig, client.Options{Scheme: scheme})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// update the current context’s namespace in kubeconfig
|
||||
pathOpts := clientcmd.NewDefaultPathOptions()
|
||||
cfg, err := pathOpts.GetStartingConfig()
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
currentContext := cfg.CurrentContext
|
||||
Expect(currentContext).NotTo(BeEmpty(), "no current kube context is set in kubeconfig")
|
||||
|
||||
ctx, ok := cfg.Contexts[currentContext]
|
||||
Expect(ok).To(BeTrue(), fmt.Sprintf("current context %q not found in kubeconfig", currentContext))
|
||||
|
||||
ctx.Namespace = clusterConfig.Namespace
|
||||
err = clientcmd.ModifyConfig(pathOpts, *cfg, true)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
return &Kube{
|
||||
Config: clusterConfig,
|
||||
Client: kubernetesClient,
|
||||
@@ -83,6 +88,14 @@ func (k *Kube) Deployment(name string) *Deployment {
|
||||
}
|
||||
}
|
||||
|
||||
func (k *Kube) Pod(selector map[string]string) *Pod {
|
||||
return &Pod{
|
||||
client: k.Client,
|
||||
config: k.Config,
|
||||
selector: selector,
|
||||
}
|
||||
}
|
||||
|
||||
// ApplyOnePasswordItem applies a OnePasswordItem manifest.
|
||||
func (k *Kube) ApplyOnePasswordItem(ctx context.Context, fileName string) {
|
||||
By("Applying " + fileName)
|
||||
@@ -110,147 +123,3 @@ func (k *Kube) ApplyOnePasswordItem(ctx context.Context, fileName string) {
|
||||
}
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
}
|
||||
|
||||
func RestartDeployment(name string) (string, error) {
|
||||
return system.Run("kubectl", "rollout", "status", name, "--timeout=120s")
|
||||
}
|
||||
|
||||
func GetPodNameBySelector(selector string) (string, error) {
|
||||
return system.Run("kubectl", "get", "pods", "-l", selector, "-o", "jsonpath={.items[0].metadata.name}")
|
||||
}
|
||||
|
||||
func CountOperatorReplicaSets() int {
|
||||
By("Counting operator replicasets")
|
||||
countStr, err := system.Run(
|
||||
"kubectl", "get", "rs",
|
||||
"-l", "name=onepassword-connect-operator",
|
||||
"-o", "jsonpath={.items[*].metadata.name}",
|
||||
)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
fields := strings.Fields(countStr)
|
||||
replicaSetCount := len(fields)
|
||||
|
||||
return replicaSetCount
|
||||
}
|
||||
|
||||
// PatchOperatorToUseServiceAccount sets `OP_SERVICE_ACCOUNT_TOKEN` env variable
|
||||
//func (s *Kube) PatchOperatorToUseServiceAccount(ctx context.Context) {
|
||||
// By("Patching the operator deployment with service account token")
|
||||
//
|
||||
// // Derive a short-lived context so this API call won't hang indefinitely.
|
||||
// c, cancel := context.WithTimeout(ctx, 10*time.Second)
|
||||
// defer cancel()
|
||||
//
|
||||
// secret, err := s.ClientSet.CoreV1().Secrets(s.Namespace).Get(c, "onepassword-service-account-token", metav1.GetOptions{})
|
||||
// Expect(err).NotTo(HaveOccurred())
|
||||
//
|
||||
// rawServiceAccountToken, ok := secret.Data["token"]
|
||||
// Expect(ok).To(BeTrue())
|
||||
//
|
||||
// serviceAccountToken, err := base64.StdEncoding.DecodeString(string(rawServiceAccountToken))
|
||||
// Expect(err).NotTo(HaveOccurred())
|
||||
//
|
||||
// deployment, err := s.ClientSet.AppsV1().
|
||||
// Deployments(s.Namespace).
|
||||
// Get(c, "onepassword-connect-operator", metav1.GetOptions{})
|
||||
// Expect(err).NotTo(HaveOccurred())
|
||||
//
|
||||
// container := &deployment.Spec.Template.Spec.Containers[0]
|
||||
//
|
||||
// withOperatorRestart[struct{}](func(_ struct{}) {
|
||||
// _, err = system.Run(
|
||||
// "kubectl", "set", "env", "deployment/onepassword-connect-operator",
|
||||
// "OP_SERVICE_ACCOUNT_TOKEN="+string(serviceAccountToken),
|
||||
// "OP_CONNECT_HOST-", // remove
|
||||
// "OP_CONNECT_TOKEN-", // remove
|
||||
// "MANAGE_CONNECT=false", // ensure operator doesn't try to manage Connect
|
||||
// )
|
||||
// Expect(err).NotTo(HaveOccurred())
|
||||
// })
|
||||
//}
|
||||
|
||||
// SetContextNamespace sets the current kubernetes context namespace
|
||||
func SetContextNamespace(namespace string) {
|
||||
By("Set namespace to " + namespace)
|
||||
_, err := system.Run("kubectl", "config", "set-context", "--current", "--namespace="+namespace)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
}
|
||||
|
||||
// PatchOperatorToAutoRestart sets `OP_SERVICE_ACCOUNT_TOKEN` env variable
|
||||
var PatchOperatorToAutoRestart = withOperatorRestart[bool](func(value bool) {
|
||||
By("patching the operator to enable AUTO_RESTART")
|
||||
_, err := system.Run(
|
||||
"kubectl", "set", "env", "deployment/onepassword-connect-operator",
|
||||
"AUTO_RESTART="+strconv.FormatBool(value),
|
||||
)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
})
|
||||
|
||||
// PatchOperatorWithCustomSecret sets new env variable CUSTOM_SECRET
|
||||
var PatchOperatorWithCustomSecret = withOperatorRestart[map[string]string](func(secret map[string]string) {
|
||||
By("patching the operator with custom secret and AUTO_RESTART=true")
|
||||
_, err := system.Run(
|
||||
"kubectl", "patch", "deployment", "onepassword-connect-operator",
|
||||
"--type=json",
|
||||
fmt.Sprintf(`-p=[{"op":"replace","path":"/spec/template/spec/containers/0/env","value":[
|
||||
{"name":"OPERATOR_NAME","value":"onepassword-connect-operator"},
|
||||
{"name":"POD_NAME","valueFrom":{"fieldRef":{"fieldPath":"metadata.name"}}},
|
||||
{"name":"WATCH_NAMESPACE","value":"default"},
|
||||
{"name":"POLLING_INTERVAL","value":"10"},
|
||||
{"name":"MANAGE_CONNECT","value":"true"},
|
||||
{"name":"AUTO_RESTART","value":"true"},
|
||||
{"name":"OP_CONNECT_HOST","value":"http://onepassword-connect:8080"},
|
||||
{
|
||||
"name":"OP_CONNECT_TOKEN",
|
||||
"valueFrom":{
|
||||
"secretKeyRef":{
|
||||
"name":"onepassword-token",
|
||||
"key":"token",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
"name":"CUSTOM_SECRET",
|
||||
"valueFrom":{
|
||||
"secretKeyRef":{
|
||||
"name":"%s",
|
||||
"key":"%s",
|
||||
},
|
||||
},
|
||||
}
|
||||
]}]`, secret["name"], secret["key"]),
|
||||
)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
})
|
||||
|
||||
// withOperatorRestart is a helper function that restarts the operator deployment
|
||||
func withOperatorRestart[T any](operation func(arg T)) func(arg T) {
|
||||
return func(arg T) {
|
||||
operation(arg)
|
||||
|
||||
_, err := RestartDeployment("deployment/onepassword-connect-operator")
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Waiting for the operator pod to be 'Running'")
|
||||
Eventually(func(g Gomega) {
|
||||
output, err := system.Run("kubectl", "get", "pods",
|
||||
"-l", "name=onepassword-connect-operator",
|
||||
"-o", "jsonpath={.items[0].status.phase}")
|
||||
g.Expect(err).NotTo(HaveOccurred())
|
||||
g.Expect(output).To(ContainSubstring("Running"))
|
||||
}, 120*time.Second, 1*time.Second).Should(Succeed())
|
||||
}
|
||||
}
|
||||
|
||||
// readPullingInterval reads the POLLING_INTERVAL env variable from the operator deployment
|
||||
// returns pulling interval in seconds as string
|
||||
func readPullingInterval() string {
|
||||
output, err := system.Run(
|
||||
"kubectl", "get", "deployment", "onepassword-connect-operator",
|
||||
"-o", "jsonpath={.spec.template.spec.containers[0].env[?(@.name==\"POLLING_INTERVAL\")].value}",
|
||||
)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
return output
|
||||
}
|
||||
|
49
pkg/testhelper/kube/pod.go
Normal file
49
pkg/testhelper/kube/pod.go
Normal file
@@ -0,0 +1,49 @@
|
||||
package kube
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
//nolint:staticcheck // ST1001
|
||||
. "github.com/onsi/ginkgo/v2"
|
||||
//nolint:staticcheck // ST1001
|
||||
. "github.com/onsi/gomega"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
"github.com/1Password/onepassword-operator/pkg/testhelper/defaults"
|
||||
)
|
||||
|
||||
type Pod struct {
|
||||
client client.Client
|
||||
config *ClusterConfig
|
||||
selector map[string]string
|
||||
}
|
||||
|
||||
func (p *Pod) WaitingForRunningPod(ctx context.Context) {
|
||||
By("Waiting for the pod " + labels.Set(p.selector).String() + " to be 'Running'")
|
||||
|
||||
Eventually(func(g Gomega) {
|
||||
// short per-attempt timeout to avoid hanging calls while Eventually polls
|
||||
attemptCtx, cancel := context.WithTimeout(ctx, 10*time.Second)
|
||||
defer cancel()
|
||||
|
||||
var pods corev1.PodList
|
||||
listOpts := []client.ListOption{
|
||||
client.InNamespace(p.config.Namespace),
|
||||
client.MatchingLabels(p.selector),
|
||||
}
|
||||
g.Expect(p.client.List(attemptCtx, &pods, listOpts...)).To(Succeed())
|
||||
g.Expect(pods.Items).NotTo(BeEmpty(), "no pods found with selector %q", labels.Set(p.selector).String())
|
||||
|
||||
foundRunning := false
|
||||
for _, p := range pods.Items {
|
||||
if p.Status.Phase == corev1.PodRunning {
|
||||
foundRunning = true
|
||||
break
|
||||
}
|
||||
}
|
||||
g.Expect(foundRunning).To(BeTrue(), "pod not Running yet")
|
||||
}, defaults.E2ETimeout, defaults.E2EInterval).Should(Succeed())
|
||||
}
|
@@ -7,12 +7,12 @@ import (
|
||||
"path/filepath"
|
||||
"time"
|
||||
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
//nolint:staticcheck // ST1001
|
||||
. "github.com/onsi/ginkgo/v2"
|
||||
//nolint:staticcheck // ST1001
|
||||
. "github.com/onsi/gomega"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
"github.com/1Password/onepassword-operator/pkg/testhelper/defaults"
|
||||
|
@@ -1,8 +1,6 @@
|
||||
package operator
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
//nolint:staticcheck // ST1001
|
||||
. "github.com/onsi/ginkgo/v2"
|
||||
//nolint:staticcheck // ST1001
|
||||
@@ -27,27 +25,3 @@ func DeployOperator() {
|
||||
_, err := system.Run("make", "deploy")
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
}
|
||||
|
||||
// WaitingForOperatorPod waits for the Operator pod to be in 'Running' state
|
||||
func WaitingForOperatorPod() {
|
||||
By("Waiting for the Operator pod to be 'Running'")
|
||||
Eventually(func(g Gomega) {
|
||||
output, err := system.Run("kubectl", "get", "pods",
|
||||
"-l", "name=onepassword-connect-operator",
|
||||
"-o", "jsonpath={.items[0].status.phase}")
|
||||
g.Expect(err).NotTo(HaveOccurred())
|
||||
g.Expect(output).To(ContainSubstring("Running"))
|
||||
}, 30*time.Second, 1*time.Second).Should(Succeed())
|
||||
}
|
||||
|
||||
// WaitingForConnectPod waits for the Connect pod to be in 'Running' state
|
||||
func WaitingForConnectPod() {
|
||||
By("Waiting for the Connect pod to be 'Running'")
|
||||
Eventually(func(g Gomega) {
|
||||
output, err := system.Run("kubectl", "get", "pods",
|
||||
"-l", "app=onepassword-connect",
|
||||
"-o", "jsonpath={.items[0].status.phase}")
|
||||
g.Expect(err).NotTo(HaveOccurred())
|
||||
g.Expect(output).To(ContainSubstring("Running"))
|
||||
}, 30*time.Second, 1*time.Second).Should(Succeed())
|
||||
}
|
||||
|
Reference in New Issue
Block a user