Update packages and add vendor directory

This commit is contained in:
Eddy Filip
2022-09-13 16:24:52 +03:00
parent 23b66f73af
commit 1d75f78891
3906 changed files with 1365387 additions and 465 deletions

View File

@@ -0,0 +1,333 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package builder
import (
"fmt"
"strings"
"github.com/go-logr/logr"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/klog/v2"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/client/apiutil"
"sigs.k8s.io/controller-runtime/pkg/controller"
"sigs.k8s.io/controller-runtime/pkg/handler"
"sigs.k8s.io/controller-runtime/pkg/manager"
"sigs.k8s.io/controller-runtime/pkg/predicate"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
"sigs.k8s.io/controller-runtime/pkg/source"
)
// Supporting mocking out functions for testing.
var newController = controller.New
var getGvk = apiutil.GVKForObject
// project represents other forms that the we can use to
// send/receive a given resource (metadata-only, unstructured, etc).
type objectProjection int
const (
// projectAsNormal doesn't change the object from the form given.
projectAsNormal objectProjection = iota
// projectAsMetadata turns this into an metadata-only watch.
projectAsMetadata
)
// Builder builds a Controller.
type Builder struct {
forInput ForInput
ownsInput []OwnsInput
watchesInput []WatchesInput
mgr manager.Manager
globalPredicates []predicate.Predicate
ctrl controller.Controller
ctrlOptions controller.Options
name string
}
// ControllerManagedBy returns a new controller builder that will be started by the provided Manager.
func ControllerManagedBy(m manager.Manager) *Builder {
return &Builder{mgr: m}
}
// ForInput represents the information set by For method.
type ForInput struct {
object client.Object
predicates []predicate.Predicate
objectProjection objectProjection
err error
}
// For defines the type of Object being *reconciled*, and configures the ControllerManagedBy to respond to create / delete /
// update events by *reconciling the object*.
// This is the equivalent of calling
// Watches(&source.Kind{Type: apiType}, &handler.EnqueueRequestForObject{}).
func (blder *Builder) For(object client.Object, opts ...ForOption) *Builder {
if blder.forInput.object != nil {
blder.forInput.err = fmt.Errorf("For(...) should only be called once, could not assign multiple objects for reconciliation")
return blder
}
input := ForInput{object: object}
for _, opt := range opts {
opt.ApplyToFor(&input)
}
blder.forInput = input
return blder
}
// OwnsInput represents the information set by Owns method.
type OwnsInput struct {
object client.Object
predicates []predicate.Predicate
objectProjection objectProjection
}
// Owns defines types of Objects being *generated* by the ControllerManagedBy, and configures the ControllerManagedBy to respond to
// create / delete / update events by *reconciling the owner object*. This is the equivalent of calling
// Watches(&source.Kind{Type: <ForType-forInput>}, &handler.EnqueueRequestForOwner{OwnerType: apiType, IsController: true}).
func (blder *Builder) Owns(object client.Object, opts ...OwnsOption) *Builder {
input := OwnsInput{object: object}
for _, opt := range opts {
opt.ApplyToOwns(&input)
}
blder.ownsInput = append(blder.ownsInput, input)
return blder
}
// WatchesInput represents the information set by Watches method.
type WatchesInput struct {
src source.Source
eventhandler handler.EventHandler
predicates []predicate.Predicate
objectProjection objectProjection
}
// Watches exposes the lower-level ControllerManagedBy Watches functions through the builder. Consider using
// Owns or For instead of Watches directly.
// Specified predicates are registered only for given source.
func (blder *Builder) Watches(src source.Source, eventhandler handler.EventHandler, opts ...WatchesOption) *Builder {
input := WatchesInput{src: src, eventhandler: eventhandler}
for _, opt := range opts {
opt.ApplyToWatches(&input)
}
blder.watchesInput = append(blder.watchesInput, input)
return blder
}
// WithEventFilter sets the event filters, to filter which create/update/delete/generic events eventually
// trigger reconciliations. For example, filtering on whether the resource version has changed.
// Given predicate is added for all watched objects.
// Defaults to the empty list.
func (blder *Builder) WithEventFilter(p predicate.Predicate) *Builder {
blder.globalPredicates = append(blder.globalPredicates, p)
return blder
}
// WithOptions overrides the controller options use in doController. Defaults to empty.
func (blder *Builder) WithOptions(options controller.Options) *Builder {
blder.ctrlOptions = options
return blder
}
// WithLogConstructor overrides the controller options's LogConstructor.
func (blder *Builder) WithLogConstructor(logConstructor func(*reconcile.Request) logr.Logger) *Builder {
blder.ctrlOptions.LogConstructor = logConstructor
return blder
}
// Named sets the name of the controller to the given name. The name shows up
// in metrics, among other things, and thus should be a prometheus compatible name
// (underscores and alphanumeric characters only).
//
// By default, controllers are named using the lowercase version of their kind.
func (blder *Builder) Named(name string) *Builder {
blder.name = name
return blder
}
// Complete builds the Application Controller.
func (blder *Builder) Complete(r reconcile.Reconciler) error {
_, err := blder.Build(r)
return err
}
// Build builds the Application Controller and returns the Controller it created.
func (blder *Builder) Build(r reconcile.Reconciler) (controller.Controller, error) {
if r == nil {
return nil, fmt.Errorf("must provide a non-nil Reconciler")
}
if blder.mgr == nil {
return nil, fmt.Errorf("must provide a non-nil Manager")
}
if blder.forInput.err != nil {
return nil, blder.forInput.err
}
// Checking the reconcile type exist or not
if blder.forInput.object == nil {
return nil, fmt.Errorf("must provide an object for reconciliation")
}
// Set the ControllerManagedBy
if err := blder.doController(r); err != nil {
return nil, err
}
// Set the Watch
if err := blder.doWatch(); err != nil {
return nil, err
}
return blder.ctrl, nil
}
func (blder *Builder) project(obj client.Object, proj objectProjection) (client.Object, error) {
switch proj {
case projectAsNormal:
return obj, nil
case projectAsMetadata:
metaObj := &metav1.PartialObjectMetadata{}
gvk, err := getGvk(obj, blder.mgr.GetScheme())
if err != nil {
return nil, fmt.Errorf("unable to determine GVK of %T for a metadata-only watch: %w", obj, err)
}
metaObj.SetGroupVersionKind(gvk)
return metaObj, nil
default:
panic(fmt.Sprintf("unexpected projection type %v on type %T, should not be possible since this is an internal field", proj, obj))
}
}
func (blder *Builder) doWatch() error {
// Reconcile type
typeForSrc, err := blder.project(blder.forInput.object, blder.forInput.objectProjection)
if err != nil {
return err
}
src := &source.Kind{Type: typeForSrc}
hdler := &handler.EnqueueRequestForObject{}
allPredicates := append(blder.globalPredicates, blder.forInput.predicates...)
if err := blder.ctrl.Watch(src, hdler, allPredicates...); err != nil {
return err
}
// Watches the managed types
for _, own := range blder.ownsInput {
typeForSrc, err := blder.project(own.object, own.objectProjection)
if err != nil {
return err
}
src := &source.Kind{Type: typeForSrc}
hdler := &handler.EnqueueRequestForOwner{
OwnerType: blder.forInput.object,
IsController: true,
}
allPredicates := append([]predicate.Predicate(nil), blder.globalPredicates...)
allPredicates = append(allPredicates, own.predicates...)
if err := blder.ctrl.Watch(src, hdler, allPredicates...); err != nil {
return err
}
}
// Do the watch requests
for _, w := range blder.watchesInput {
allPredicates := append([]predicate.Predicate(nil), blder.globalPredicates...)
allPredicates = append(allPredicates, w.predicates...)
// If the source of this watch is of type *source.Kind, project it.
if srckind, ok := w.src.(*source.Kind); ok {
typeForSrc, err := blder.project(srckind.Type, w.objectProjection)
if err != nil {
return err
}
srckind.Type = typeForSrc
}
if err := blder.ctrl.Watch(w.src, w.eventhandler, allPredicates...); err != nil {
return err
}
}
return nil
}
func (blder *Builder) getControllerName(gvk schema.GroupVersionKind) string {
if blder.name != "" {
return blder.name
}
return strings.ToLower(gvk.Kind)
}
func (blder *Builder) doController(r reconcile.Reconciler) error {
globalOpts := blder.mgr.GetControllerOptions()
ctrlOptions := blder.ctrlOptions
if ctrlOptions.Reconciler == nil {
ctrlOptions.Reconciler = r
}
// Retrieve the GVK from the object we're reconciling
// to prepopulate logger information, and to optionally generate a default name.
gvk, err := getGvk(blder.forInput.object, blder.mgr.GetScheme())
if err != nil {
return err
}
// Setup concurrency.
if ctrlOptions.MaxConcurrentReconciles == 0 {
groupKind := gvk.GroupKind().String()
if concurrency, ok := globalOpts.GroupKindConcurrency[groupKind]; ok && concurrency > 0 {
ctrlOptions.MaxConcurrentReconciles = concurrency
}
}
// Setup cache sync timeout.
if ctrlOptions.CacheSyncTimeout == 0 && globalOpts.CacheSyncTimeout != nil {
ctrlOptions.CacheSyncTimeout = *globalOpts.CacheSyncTimeout
}
controllerName := blder.getControllerName(gvk)
// Setup the logger.
if ctrlOptions.LogConstructor == nil {
log := blder.mgr.GetLogger().WithValues(
"controller", controllerName,
"controllerGroup", gvk.Group,
"controllerKind", gvk.Kind,
)
ctrlOptions.LogConstructor = func(req *reconcile.Request) logr.Logger {
log := log
if req != nil {
log = log.WithValues(
gvk.Kind, klog.KRef(req.Namespace, req.Name),
"namespace", req.Namespace, "name", req.Name,
)
}
return log
}
}
// Build the controller and return.
blder.ctrl, err = newController(controllerName, blder.mgr, ctrlOptions)
return err
}

View File

@@ -0,0 +1,28 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package builder wraps other controller-runtime libraries and exposes simple
// patterns for building common Controllers.
//
// Projects built with the builder package can trivially be rebased on top of the underlying
// packages if the project requires more customized behavior in the future.
package builder
import (
logf "sigs.k8s.io/controller-runtime/pkg/internal/log"
)
var log = logf.RuntimeLog.WithName("builder")

View File

@@ -0,0 +1,140 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package builder
import (
"sigs.k8s.io/controller-runtime/pkg/predicate"
)
// {{{ "Functional" Option Interfaces
// ForOption is some configuration that modifies options for a For request.
type ForOption interface {
// ApplyToFor applies this configuration to the given for input.
ApplyToFor(*ForInput)
}
// OwnsOption is some configuration that modifies options for a owns request.
type OwnsOption interface {
// ApplyToOwns applies this configuration to the given owns input.
ApplyToOwns(*OwnsInput)
}
// WatchesOption is some configuration that modifies options for a watches request.
type WatchesOption interface {
// ApplyToWatches applies this configuration to the given watches options.
ApplyToWatches(*WatchesInput)
}
// }}}
// {{{ Multi-Type Options
// WithPredicates sets the given predicates list.
func WithPredicates(predicates ...predicate.Predicate) Predicates {
return Predicates{
predicates: predicates,
}
}
// Predicates filters events before enqueuing the keys.
type Predicates struct {
predicates []predicate.Predicate
}
// ApplyToFor applies this configuration to the given ForInput options.
func (w Predicates) ApplyToFor(opts *ForInput) {
opts.predicates = w.predicates
}
// ApplyToOwns applies this configuration to the given OwnsInput options.
func (w Predicates) ApplyToOwns(opts *OwnsInput) {
opts.predicates = w.predicates
}
// ApplyToWatches applies this configuration to the given WatchesInput options.
func (w Predicates) ApplyToWatches(opts *WatchesInput) {
opts.predicates = w.predicates
}
var _ ForOption = &Predicates{}
var _ OwnsOption = &Predicates{}
var _ WatchesOption = &Predicates{}
// }}}
// {{{ For & Owns Dual-Type options
// asProjection configures the projection (currently only metadata) on the input.
// Currently only metadata is supported. We might want to expand
// this to arbitrary non-special local projections in the future.
type projectAs objectProjection
// ApplyToFor applies this configuration to the given ForInput options.
func (p projectAs) ApplyToFor(opts *ForInput) {
opts.objectProjection = objectProjection(p)
}
// ApplyToOwns applies this configuration to the given OwnsInput options.
func (p projectAs) ApplyToOwns(opts *OwnsInput) {
opts.objectProjection = objectProjection(p)
}
// ApplyToWatches applies this configuration to the given WatchesInput options.
func (p projectAs) ApplyToWatches(opts *WatchesInput) {
opts.objectProjection = objectProjection(p)
}
var (
// OnlyMetadata tells the controller to *only* cache metadata, and to watch
// the the API server in metadata-only form. This is useful when watching
// lots of objects, really big objects, or objects for which you only know
// the the GVK, but not the structure. You'll need to pass
// metav1.PartialObjectMetadata to the client when fetching objects in your
// reconciler, otherwise you'll end up with a duplicate structured or
// unstructured cache.
//
// When watching a resource with OnlyMetadata, for example the v1.Pod, you
// should not Get and List using the v1.Pod type. Instead, you should use
// the special metav1.PartialObjectMetadata type.
//
// ❌ Incorrect:
//
// pod := &v1.Pod{}
// mgr.GetClient().Get(ctx, nsAndName, pod)
//
// ✅ Correct:
//
// pod := &metav1.PartialObjectMetadata{}
// pod.SetGroupVersionKind(schema.GroupVersionKind{
// Group: "",
// Version: "v1",
// Kind: "Pod",
// })
// mgr.GetClient().Get(ctx, nsAndName, pod)
//
// In the first case, controller-runtime will create another cache for the
// concrete type on top of the metadata cache; this increases memory
// consumption and leads to race conditions as caches are not in sync.
OnlyMetadata = projectAs(projectAsMetadata)
_ ForOption = OnlyMetadata
_ OwnsOption = OnlyMetadata
_ WatchesOption = OnlyMetadata
)
// }}}

View File

@@ -0,0 +1,216 @@
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package builder
import (
"errors"
"net/http"
"net/url"
"strings"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/client-go/rest"
"sigs.k8s.io/controller-runtime/pkg/client/apiutil"
"sigs.k8s.io/controller-runtime/pkg/manager"
"sigs.k8s.io/controller-runtime/pkg/webhook/admission"
"sigs.k8s.io/controller-runtime/pkg/webhook/conversion"
)
// WebhookBuilder builds a Webhook.
type WebhookBuilder struct {
apiType runtime.Object
withDefaulter admission.CustomDefaulter
withValidator admission.CustomValidator
gvk schema.GroupVersionKind
mgr manager.Manager
config *rest.Config
recoverPanic bool
}
// WebhookManagedBy allows inform its manager.Manager.
func WebhookManagedBy(m manager.Manager) *WebhookBuilder {
return &WebhookBuilder{mgr: m}
}
// TODO(droot): update the GoDoc for conversion.
// For takes a runtime.Object which should be a CR.
// If the given object implements the admission.Defaulter interface, a MutatingWebhook will be wired for this type.
// If the given object implements the admission.Validator interface, a ValidatingWebhook will be wired for this type.
func (blder *WebhookBuilder) For(apiType runtime.Object) *WebhookBuilder {
blder.apiType = apiType
return blder
}
// WithDefaulter takes a admission.WithDefaulter interface, a MutatingWebhook will be wired for this type.
func (blder *WebhookBuilder) WithDefaulter(defaulter admission.CustomDefaulter) *WebhookBuilder {
blder.withDefaulter = defaulter
return blder
}
// WithValidator takes a admission.WithValidator interface, a ValidatingWebhook will be wired for this type.
func (blder *WebhookBuilder) WithValidator(validator admission.CustomValidator) *WebhookBuilder {
blder.withValidator = validator
return blder
}
// RecoverPanic indicates whether the panic caused by webhook should be recovered.
func (blder *WebhookBuilder) RecoverPanic() *WebhookBuilder {
blder.recoverPanic = true
return blder
}
// Complete builds the webhook.
func (blder *WebhookBuilder) Complete() error {
// Set the Config
blder.loadRestConfig()
// Set the Webhook if needed
return blder.registerWebhooks()
}
func (blder *WebhookBuilder) loadRestConfig() {
if blder.config == nil {
blder.config = blder.mgr.GetConfig()
}
}
func (blder *WebhookBuilder) registerWebhooks() error {
typ, err := blder.getType()
if err != nil {
return err
}
// Create webhook(s) for each type
blder.gvk, err = apiutil.GVKForObject(typ, blder.mgr.GetScheme())
if err != nil {
return err
}
blder.registerDefaultingWebhook()
blder.registerValidatingWebhook()
err = blder.registerConversionWebhook()
if err != nil {
return err
}
return nil
}
// registerDefaultingWebhook registers a defaulting webhook if th.
func (blder *WebhookBuilder) registerDefaultingWebhook() {
mwh := blder.getDefaultingWebhook()
if mwh != nil {
path := generateMutatePath(blder.gvk)
// Checking if the path is already registered.
// If so, just skip it.
if !blder.isAlreadyHandled(path) {
log.Info("Registering a mutating webhook",
"GVK", blder.gvk,
"path", path)
blder.mgr.GetWebhookServer().Register(path, mwh)
}
}
}
func (blder *WebhookBuilder) getDefaultingWebhook() *admission.Webhook {
if defaulter := blder.withDefaulter; defaulter != nil {
return admission.WithCustomDefaulter(blder.apiType, defaulter).WithRecoverPanic(blder.recoverPanic)
}
if defaulter, ok := blder.apiType.(admission.Defaulter); ok {
return admission.DefaultingWebhookFor(defaulter).WithRecoverPanic(blder.recoverPanic)
}
log.Info(
"skip registering a mutating webhook, object does not implement admission.Defaulter or WithDefaulter wasn't called",
"GVK", blder.gvk)
return nil
}
func (blder *WebhookBuilder) registerValidatingWebhook() {
vwh := blder.getValidatingWebhook()
if vwh != nil {
path := generateValidatePath(blder.gvk)
// Checking if the path is already registered.
// If so, just skip it.
if !blder.isAlreadyHandled(path) {
log.Info("Registering a validating webhook",
"GVK", blder.gvk,
"path", path)
blder.mgr.GetWebhookServer().Register(path, vwh)
}
}
}
func (blder *WebhookBuilder) getValidatingWebhook() *admission.Webhook {
if validator := blder.withValidator; validator != nil {
return admission.WithCustomValidator(blder.apiType, validator).WithRecoverPanic(blder.recoverPanic)
}
if validator, ok := blder.apiType.(admission.Validator); ok {
return admission.ValidatingWebhookFor(validator).WithRecoverPanic(blder.recoverPanic)
}
log.Info(
"skip registering a validating webhook, object does not implement admission.Validator or WithValidator wasn't called",
"GVK", blder.gvk)
return nil
}
func (blder *WebhookBuilder) registerConversionWebhook() error {
ok, err := conversion.IsConvertible(blder.mgr.GetScheme(), blder.apiType)
if err != nil {
log.Error(err, "conversion check failed", "GVK", blder.gvk)
return err
}
if ok {
if !blder.isAlreadyHandled("/convert") {
blder.mgr.GetWebhookServer().Register("/convert", &conversion.Webhook{})
}
log.Info("Conversion webhook enabled", "GVK", blder.gvk)
}
return nil
}
func (blder *WebhookBuilder) getType() (runtime.Object, error) {
if blder.apiType != nil {
return blder.apiType, nil
}
return nil, errors.New("For() must be called with a valid object")
}
func (blder *WebhookBuilder) isAlreadyHandled(path string) bool {
if blder.mgr.GetWebhookServer().WebhookMux == nil {
return false
}
h, p := blder.mgr.GetWebhookServer().WebhookMux.Handler(&http.Request{URL: &url.URL{Path: path}})
if p == path && h != nil {
return true
}
return false
}
func generateMutatePath(gvk schema.GroupVersionKind) string {
return "/mutate-" + strings.ReplaceAll(gvk.Group, ".", "-") + "-" +
gvk.Version + "-" + strings.ToLower(gvk.Kind)
}
func generateValidatePath(gvk schema.GroupVersionKind) string {
return "/validate-" + strings.ReplaceAll(gvk.Group, ".", "-") + "-" +
gvk.Version + "-" + strings.ToLower(gvk.Kind)
}

View File

@@ -0,0 +1,275 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cache
import (
"context"
"fmt"
"time"
"k8s.io/apimachinery/pkg/api/meta"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/client-go/kubernetes/scheme"
"k8s.io/client-go/rest"
toolscache "k8s.io/client-go/tools/cache"
"sigs.k8s.io/controller-runtime/pkg/cache/internal"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/client/apiutil"
logf "sigs.k8s.io/controller-runtime/pkg/internal/log"
)
var log = logf.RuntimeLog.WithName("object-cache")
// Cache knows how to load Kubernetes objects, fetch informers to request
// to receive events for Kubernetes objects (at a low-level),
// and add indices to fields on the objects stored in the cache.
type Cache interface {
// Cache acts as a client to objects stored in the cache.
client.Reader
// Cache loads informers and adds field indices.
Informers
}
// Informers knows how to create or fetch informers for different
// group-version-kinds, and add indices to those informers. It's safe to call
// GetInformer from multiple threads.
type Informers interface {
// GetInformer fetches or constructs an informer for the given object that corresponds to a single
// API kind and resource.
GetInformer(ctx context.Context, obj client.Object) (Informer, error)
// GetInformerForKind is similar to GetInformer, except that it takes a group-version-kind, instead
// of the underlying object.
GetInformerForKind(ctx context.Context, gvk schema.GroupVersionKind) (Informer, error)
// Start runs all the informers known to this cache until the context is closed.
// It blocks.
Start(ctx context.Context) error
// WaitForCacheSync waits for all the caches to sync. Returns false if it could not sync a cache.
WaitForCacheSync(ctx context.Context) bool
// Informers knows how to add indices to the caches (informers) that it manages.
client.FieldIndexer
}
// Informer - informer allows you interact with the underlying informer.
type Informer interface {
// AddEventHandler adds an event handler to the shared informer using the shared informer's resync
// period. Events to a single handler are delivered sequentially, but there is no coordination
// between different handlers.
AddEventHandler(handler toolscache.ResourceEventHandler)
// AddEventHandlerWithResyncPeriod adds an event handler to the shared informer using the
// specified resync period. Events to a single handler are delivered sequentially, but there is
// no coordination between different handlers.
AddEventHandlerWithResyncPeriod(handler toolscache.ResourceEventHandler, resyncPeriod time.Duration)
// AddIndexers adds more indexers to this store. If you call this after you already have data
// in the store, the results are undefined.
AddIndexers(indexers toolscache.Indexers) error
// HasSynced return true if the informers underlying store has synced.
HasSynced() bool
}
// ObjectSelector is an alias name of internal.Selector.
type ObjectSelector internal.Selector
// SelectorsByObject associate a client.Object's GVK to a field/label selector.
// There is also `DefaultSelector` to set a global default (which will be overridden by
// a more specific setting here, if any).
type SelectorsByObject map[client.Object]ObjectSelector
// Options are the optional arguments for creating a new InformersMap object.
type Options struct {
// Scheme is the scheme to use for mapping objects to GroupVersionKinds
Scheme *runtime.Scheme
// Mapper is the RESTMapper to use for mapping GroupVersionKinds to Resources
Mapper meta.RESTMapper
// Resync is the base frequency the informers are resynced.
// Defaults to defaultResyncTime.
// A 10 percent jitter will be added to the Resync period between informers
// So that all informers will not send list requests simultaneously.
Resync *time.Duration
// Namespace restricts the cache's ListWatch to the desired namespace
// Default watches all namespaces
Namespace string
// SelectorsByObject restricts the cache's ListWatch to the desired
// fields per GVK at the specified object, the map's value must implement
// Selector [1] using for example a Set [2]
// [1] https://pkg.go.dev/k8s.io/apimachinery/pkg/fields#Selector
// [2] https://pkg.go.dev/k8s.io/apimachinery/pkg/fields#Set
SelectorsByObject SelectorsByObject
// DefaultSelector will be used as selectors for all object types
// that do not have a selector in SelectorsByObject defined.
DefaultSelector ObjectSelector
// UnsafeDisableDeepCopyByObject indicates not to deep copy objects during get or
// list objects per GVK at the specified object.
// Be very careful with this, when enabled you must DeepCopy any object before mutating it,
// otherwise you will mutate the object in the cache.
UnsafeDisableDeepCopyByObject DisableDeepCopyByObject
// TransformByObject is a map from GVKs to transformer functions which
// get applied when objects of the transformation are about to be committed
// to cache.
//
// This function is called both for new objects to enter the cache,
// and for updated objects.
TransformByObject TransformByObject
// DefaultTransform is the transform used for all GVKs which do
// not have an explicit transform func set in TransformByObject
DefaultTransform toolscache.TransformFunc
}
var defaultResyncTime = 10 * time.Hour
// New initializes and returns a new Cache.
func New(config *rest.Config, opts Options) (Cache, error) {
opts, err := defaultOpts(config, opts)
if err != nil {
return nil, err
}
selectorsByGVK, err := convertToSelectorsByGVK(opts.SelectorsByObject, opts.DefaultSelector, opts.Scheme)
if err != nil {
return nil, err
}
disableDeepCopyByGVK, err := convertToDisableDeepCopyByGVK(opts.UnsafeDisableDeepCopyByObject, opts.Scheme)
if err != nil {
return nil, err
}
transformByGVK, err := convertToTransformByKindAndGVK(opts.TransformByObject, opts.DefaultTransform, opts.Scheme)
if err != nil {
return nil, err
}
im := internal.NewInformersMap(config, opts.Scheme, opts.Mapper, *opts.Resync, opts.Namespace, selectorsByGVK, disableDeepCopyByGVK, transformByGVK)
return &informerCache{InformersMap: im}, nil
}
// BuilderWithOptions returns a Cache constructor that will build the a cache
// honoring the options argument, this is useful to specify options like
// SelectorsByObject
// WARNING: If SelectorsByObject is specified, filtered out resources are not
// returned.
// WARNING: If UnsafeDisableDeepCopy is enabled, you must DeepCopy any object
// returned from cache get/list before mutating it.
func BuilderWithOptions(options Options) NewCacheFunc {
return func(config *rest.Config, opts Options) (Cache, error) {
if options.Scheme == nil {
options.Scheme = opts.Scheme
}
if options.Mapper == nil {
options.Mapper = opts.Mapper
}
if options.Resync == nil {
options.Resync = opts.Resync
}
if options.Namespace == "" {
options.Namespace = opts.Namespace
}
if opts.Resync == nil {
opts.Resync = options.Resync
}
return New(config, options)
}
}
func defaultOpts(config *rest.Config, opts Options) (Options, error) {
// Use the default Kubernetes Scheme if unset
if opts.Scheme == nil {
opts.Scheme = scheme.Scheme
}
// Construct a new Mapper if unset
if opts.Mapper == nil {
var err error
opts.Mapper, err = apiutil.NewDiscoveryRESTMapper(config)
if err != nil {
log.WithName("setup").Error(err, "Failed to get API Group-Resources")
return opts, fmt.Errorf("could not create RESTMapper from config")
}
}
// Default the resync period to 10 hours if unset
if opts.Resync == nil {
opts.Resync = &defaultResyncTime
}
return opts, nil
}
func convertToSelectorsByGVK(selectorsByObject SelectorsByObject, defaultSelector ObjectSelector, scheme *runtime.Scheme) (internal.SelectorsByGVK, error) {
selectorsByGVK := internal.SelectorsByGVK{}
for object, selector := range selectorsByObject {
gvk, err := apiutil.GVKForObject(object, scheme)
if err != nil {
return nil, err
}
selectorsByGVK[gvk] = internal.Selector(selector)
}
selectorsByGVK[schema.GroupVersionKind{}] = internal.Selector(defaultSelector)
return selectorsByGVK, nil
}
// DisableDeepCopyByObject associate a client.Object's GVK to disable DeepCopy during get or list from cache.
type DisableDeepCopyByObject map[client.Object]bool
var _ client.Object = &ObjectAll{}
// ObjectAll is the argument to represent all objects' types.
type ObjectAll struct {
client.Object
}
func convertToDisableDeepCopyByGVK(disableDeepCopyByObject DisableDeepCopyByObject, scheme *runtime.Scheme) (internal.DisableDeepCopyByGVK, error) {
disableDeepCopyByGVK := internal.DisableDeepCopyByGVK{}
for obj, disable := range disableDeepCopyByObject {
switch obj.(type) {
case ObjectAll, *ObjectAll:
disableDeepCopyByGVK[internal.GroupVersionKindAll] = disable
default:
gvk, err := apiutil.GVKForObject(obj, scheme)
if err != nil {
return nil, err
}
disableDeepCopyByGVK[gvk] = disable
}
}
return disableDeepCopyByGVK, nil
}
// TransformByObject associate a client.Object's GVK to a transformer function
// to be applied when storing the object into the cache.
type TransformByObject map[client.Object]toolscache.TransformFunc
func convertToTransformByKindAndGVK(t TransformByObject, defaultTransform toolscache.TransformFunc, scheme *runtime.Scheme) (internal.TransformFuncByObject, error) {
result := internal.NewTransformFuncByObject()
for obj, transformation := range t {
if err := result.Set(obj, scheme, transformation); err != nil {
return nil, err
}
}
result.SetDefault(defaultTransform)
return result, nil
}

19
vendor/sigs.k8s.io/controller-runtime/pkg/cache/doc.go generated vendored Normal file
View File

@@ -0,0 +1,19 @@
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package cache provides object caches that act as caching client.Reader
// instances and help drive Kubernetes-object-based event handlers.
package cache

View File

@@ -0,0 +1,217 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cache
import (
"context"
"fmt"
"reflect"
"strings"
apimeta "k8s.io/apimachinery/pkg/api/meta"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/client-go/tools/cache"
"sigs.k8s.io/controller-runtime/pkg/cache/internal"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/client/apiutil"
)
var (
_ Informers = &informerCache{}
_ client.Reader = &informerCache{}
_ Cache = &informerCache{}
)
// ErrCacheNotStarted is returned when trying to read from the cache that wasn't started.
type ErrCacheNotStarted struct{}
func (*ErrCacheNotStarted) Error() string {
return "the cache is not started, can not read objects"
}
// informerCache is a Kubernetes Object cache populated from InformersMap. informerCache wraps an InformersMap.
type informerCache struct {
*internal.InformersMap
}
// Get implements Reader.
func (ip *informerCache) Get(ctx context.Context, key client.ObjectKey, out client.Object, opts ...client.GetOption) error {
gvk, err := apiutil.GVKForObject(out, ip.Scheme)
if err != nil {
return err
}
started, cache, err := ip.InformersMap.Get(ctx, gvk, out)
if err != nil {
return err
}
if !started {
return &ErrCacheNotStarted{}
}
return cache.Reader.Get(ctx, key, out)
}
// List implements Reader.
func (ip *informerCache) List(ctx context.Context, out client.ObjectList, opts ...client.ListOption) error {
gvk, cacheTypeObj, err := ip.objectTypeForListObject(out)
if err != nil {
return err
}
started, cache, err := ip.InformersMap.Get(ctx, *gvk, cacheTypeObj)
if err != nil {
return err
}
if !started {
return &ErrCacheNotStarted{}
}
return cache.Reader.List(ctx, out, opts...)
}
// objectTypeForListObject tries to find the runtime.Object and associated GVK
// for a single object corresponding to the passed-in list type. We need them
// because they are used as cache map key.
func (ip *informerCache) objectTypeForListObject(list client.ObjectList) (*schema.GroupVersionKind, runtime.Object, error) {
gvk, err := apiutil.GVKForObject(list, ip.Scheme)
if err != nil {
return nil, nil, err
}
// we need the non-list GVK, so chop off the "List" from the end of the kind
if strings.HasSuffix(gvk.Kind, "List") && apimeta.IsListType(list) {
gvk.Kind = gvk.Kind[:len(gvk.Kind)-4]
}
_, isUnstructured := list.(*unstructured.UnstructuredList)
var cacheTypeObj runtime.Object
if isUnstructured {
u := &unstructured.Unstructured{}
u.SetGroupVersionKind(gvk)
cacheTypeObj = u
} else {
itemsPtr, err := apimeta.GetItemsPtr(list)
if err != nil {
return nil, nil, err
}
// http://knowyourmeme.com/memes/this-is-fine
elemType := reflect.Indirect(reflect.ValueOf(itemsPtr)).Type().Elem()
if elemType.Kind() != reflect.Ptr {
elemType = reflect.PtrTo(elemType)
}
cacheTypeValue := reflect.Zero(elemType)
var ok bool
cacheTypeObj, ok = cacheTypeValue.Interface().(runtime.Object)
if !ok {
return nil, nil, fmt.Errorf("cannot get cache for %T, its element %T is not a runtime.Object", list, cacheTypeValue.Interface())
}
}
return &gvk, cacheTypeObj, nil
}
// GetInformerForKind returns the informer for the GroupVersionKind.
func (ip *informerCache) GetInformerForKind(ctx context.Context, gvk schema.GroupVersionKind) (Informer, error) {
// Map the gvk to an object
obj, err := ip.Scheme.New(gvk)
if err != nil {
return nil, err
}
_, i, err := ip.InformersMap.Get(ctx, gvk, obj)
if err != nil {
return nil, err
}
return i.Informer, err
}
// GetInformer returns the informer for the obj.
func (ip *informerCache) GetInformer(ctx context.Context, obj client.Object) (Informer, error) {
gvk, err := apiutil.GVKForObject(obj, ip.Scheme)
if err != nil {
return nil, err
}
_, i, err := ip.InformersMap.Get(ctx, gvk, obj)
if err != nil {
return nil, err
}
return i.Informer, err
}
// NeedLeaderElection implements the LeaderElectionRunnable interface
// to indicate that this can be started without requiring the leader lock.
func (ip *informerCache) NeedLeaderElection() bool {
return false
}
// IndexField adds an indexer to the underlying cache, using extraction function to get
// value(s) from the given field. This index can then be used by passing a field selector
// to List. For one-to-one compatibility with "normal" field selectors, only return one value.
// The values may be anything. They will automatically be prefixed with the namespace of the
// given object, if present. The objects passed are guaranteed to be objects of the correct type.
func (ip *informerCache) IndexField(ctx context.Context, obj client.Object, field string, extractValue client.IndexerFunc) error {
informer, err := ip.GetInformer(ctx, obj)
if err != nil {
return err
}
return indexByField(informer, field, extractValue)
}
func indexByField(indexer Informer, field string, extractor client.IndexerFunc) error {
indexFunc := func(objRaw interface{}) ([]string, error) {
// TODO(directxman12): check if this is the correct type?
obj, isObj := objRaw.(client.Object)
if !isObj {
return nil, fmt.Errorf("object of type %T is not an Object", objRaw)
}
meta, err := apimeta.Accessor(obj)
if err != nil {
return nil, err
}
ns := meta.GetNamespace()
rawVals := extractor(obj)
var vals []string
if ns == "" {
// if we're not doubling the keys for the namespaced case, just create a new slice with same length
vals = make([]string, len(rawVals))
} else {
// if we need to add non-namespaced versions too, double the length
vals = make([]string, len(rawVals)*2)
}
for i, rawVal := range rawVals {
// save a namespaced variant, so that we can ask
// "what are all the object matching a given index *in a given namespace*"
vals[i] = internal.KeyToNamespacedKey(ns, rawVal)
if ns != "" {
// if we have a namespace, also inject a special index key for listing
// regardless of the object namespace
vals[i+len(rawVals)] = internal.KeyToNamespacedKey("", rawVal)
}
}
return vals, nil
}
return indexer.AddIndexers(cache.Indexers{internal.FieldIndexName(field): indexFunc})
}

View File

@@ -0,0 +1,218 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package internal
import (
"context"
"fmt"
"reflect"
apierrors "k8s.io/apimachinery/pkg/api/errors"
apimeta "k8s.io/apimachinery/pkg/api/meta"
"k8s.io/apimachinery/pkg/fields"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/selection"
"k8s.io/client-go/tools/cache"
"sigs.k8s.io/controller-runtime/pkg/client"
)
// CacheReader is a client.Reader.
var _ client.Reader = &CacheReader{}
// CacheReader wraps a cache.Index to implement the client.CacheReader interface for a single type.
type CacheReader struct {
// indexer is the underlying indexer wrapped by this cache.
indexer cache.Indexer
// groupVersionKind is the group-version-kind of the resource.
groupVersionKind schema.GroupVersionKind
// scopeName is the scope of the resource (namespaced or cluster-scoped).
scopeName apimeta.RESTScopeName
// disableDeepCopy indicates not to deep copy objects during get or list objects.
// Be very careful with this, when enabled you must DeepCopy any object before mutating it,
// otherwise you will mutate the object in the cache.
disableDeepCopy bool
}
// Get checks the indexer for the object and writes a copy of it if found.
func (c *CacheReader) Get(_ context.Context, key client.ObjectKey, out client.Object, opts ...client.GetOption) error {
if c.scopeName == apimeta.RESTScopeNameRoot {
key.Namespace = ""
}
storeKey := objectKeyToStoreKey(key)
// Lookup the object from the indexer cache
obj, exists, err := c.indexer.GetByKey(storeKey)
if err != nil {
return err
}
// Not found, return an error
if !exists {
// Resource gets transformed into Kind in the error anyway, so this is fine
return apierrors.NewNotFound(schema.GroupResource{
Group: c.groupVersionKind.Group,
Resource: c.groupVersionKind.Kind,
}, key.Name)
}
// Verify the result is a runtime.Object
if _, isObj := obj.(runtime.Object); !isObj {
// This should never happen
return fmt.Errorf("cache contained %T, which is not an Object", obj)
}
if c.disableDeepCopy {
// skip deep copy which might be unsafe
// you must DeepCopy any object before mutating it outside
} else {
// deep copy to avoid mutating cache
obj = obj.(runtime.Object).DeepCopyObject()
}
// Copy the value of the item in the cache to the returned value
// TODO(directxman12): this is a terrible hack, pls fix (we should have deepcopyinto)
outVal := reflect.ValueOf(out)
objVal := reflect.ValueOf(obj)
if !objVal.Type().AssignableTo(outVal.Type()) {
return fmt.Errorf("cache had type %s, but %s was asked for", objVal.Type(), outVal.Type())
}
reflect.Indirect(outVal).Set(reflect.Indirect(objVal))
if !c.disableDeepCopy {
out.GetObjectKind().SetGroupVersionKind(c.groupVersionKind)
}
return nil
}
// List lists items out of the indexer and writes them to out.
func (c *CacheReader) List(_ context.Context, out client.ObjectList, opts ...client.ListOption) error {
var objs []interface{}
var err error
listOpts := client.ListOptions{}
listOpts.ApplyOptions(opts)
switch {
case listOpts.FieldSelector != nil:
// TODO(directxman12): support more complicated field selectors by
// combining multiple indices, GetIndexers, etc
field, val, requiresExact := requiresExactMatch(listOpts.FieldSelector)
if !requiresExact {
return fmt.Errorf("non-exact field matches are not supported by the cache")
}
// list all objects by the field selector. If this is namespaced and we have one, ask for the
// namespaced index key. Otherwise, ask for the non-namespaced variant by using the fake "all namespaces"
// namespace.
objs, err = c.indexer.ByIndex(FieldIndexName(field), KeyToNamespacedKey(listOpts.Namespace, val))
case listOpts.Namespace != "":
objs, err = c.indexer.ByIndex(cache.NamespaceIndex, listOpts.Namespace)
default:
objs = c.indexer.List()
}
if err != nil {
return err
}
var labelSel labels.Selector
if listOpts.LabelSelector != nil {
labelSel = listOpts.LabelSelector
}
limitSet := listOpts.Limit > 0
runtimeObjs := make([]runtime.Object, 0, len(objs))
for _, item := range objs {
// if the Limit option is set and the number of items
// listed exceeds this limit, then stop reading.
if limitSet && int64(len(runtimeObjs)) >= listOpts.Limit {
break
}
obj, isObj := item.(runtime.Object)
if !isObj {
return fmt.Errorf("cache contained %T, which is not an Object", obj)
}
meta, err := apimeta.Accessor(obj)
if err != nil {
return err
}
if labelSel != nil {
lbls := labels.Set(meta.GetLabels())
if !labelSel.Matches(lbls) {
continue
}
}
var outObj runtime.Object
if c.disableDeepCopy {
// skip deep copy which might be unsafe
// you must DeepCopy any object before mutating it outside
outObj = obj
} else {
outObj = obj.DeepCopyObject()
outObj.GetObjectKind().SetGroupVersionKind(c.groupVersionKind)
}
runtimeObjs = append(runtimeObjs, outObj)
}
return apimeta.SetList(out, runtimeObjs)
}
// objectKeyToStorageKey converts an object key to store key.
// It's akin to MetaNamespaceKeyFunc. It's separate from
// String to allow keeping the key format easily in sync with
// MetaNamespaceKeyFunc.
func objectKeyToStoreKey(k client.ObjectKey) string {
if k.Namespace == "" {
return k.Name
}
return k.Namespace + "/" + k.Name
}
// requiresExactMatch checks if the given field selector is of the form `k=v` or `k==v`.
func requiresExactMatch(sel fields.Selector) (field, val string, required bool) {
reqs := sel.Requirements()
if len(reqs) != 1 {
return "", "", false
}
req := reqs[0]
if req.Operator != selection.Equals && req.Operator != selection.DoubleEquals {
return "", "", false
}
return req.Field, req.Value, true
}
// FieldIndexName constructs the name of the index over the given field,
// for use with an indexer.
func FieldIndexName(field string) string {
return "field:" + field
}
// noNamespaceNamespace is used as the "namespace" when we want to list across all namespaces.
const allNamespacesNamespace = "__all_namespaces"
// KeyToNamespacedKey prefixes the given index key with a namespace
// for use in field selector indexes.
func KeyToNamespacedKey(ns string, baseKey string) string {
if ns != "" {
return ns + "/" + baseKey
}
return allNamespacesNamespace + "/" + baseKey
}

View File

@@ -0,0 +1,126 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package internal
import (
"context"
"time"
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/cache"
)
// InformersMap create and caches Informers for (runtime.Object, schema.GroupVersionKind) pairs.
// It uses a standard parameter codec constructed based on the given generated Scheme.
type InformersMap struct {
// we abstract over the details of structured/unstructured/metadata with the specificInformerMaps
// TODO(directxman12): genericize this over different projections now that we have 3 different maps
structured *specificInformersMap
unstructured *specificInformersMap
metadata *specificInformersMap
// Scheme maps runtime.Objects to GroupVersionKinds
Scheme *runtime.Scheme
}
// NewInformersMap creates a new InformersMap that can create informers for
// both structured and unstructured objects.
func NewInformersMap(config *rest.Config,
scheme *runtime.Scheme,
mapper meta.RESTMapper,
resync time.Duration,
namespace string,
selectors SelectorsByGVK,
disableDeepCopy DisableDeepCopyByGVK,
transformers TransformFuncByObject,
) *InformersMap {
return &InformersMap{
structured: newStructuredInformersMap(config, scheme, mapper, resync, namespace, selectors, disableDeepCopy, transformers),
unstructured: newUnstructuredInformersMap(config, scheme, mapper, resync, namespace, selectors, disableDeepCopy, transformers),
metadata: newMetadataInformersMap(config, scheme, mapper, resync, namespace, selectors, disableDeepCopy, transformers),
Scheme: scheme,
}
}
// Start calls Run on each of the informers and sets started to true. Blocks on the context.
func (m *InformersMap) Start(ctx context.Context) error {
go m.structured.Start(ctx)
go m.unstructured.Start(ctx)
go m.metadata.Start(ctx)
<-ctx.Done()
return nil
}
// WaitForCacheSync waits until all the caches have been started and synced.
func (m *InformersMap) WaitForCacheSync(ctx context.Context) bool {
syncedFuncs := append([]cache.InformerSynced(nil), m.structured.HasSyncedFuncs()...)
syncedFuncs = append(syncedFuncs, m.unstructured.HasSyncedFuncs()...)
syncedFuncs = append(syncedFuncs, m.metadata.HasSyncedFuncs()...)
if !m.structured.waitForStarted(ctx) {
return false
}
if !m.unstructured.waitForStarted(ctx) {
return false
}
if !m.metadata.waitForStarted(ctx) {
return false
}
return cache.WaitForCacheSync(ctx.Done(), syncedFuncs...)
}
// Get will create a new Informer and add it to the map of InformersMap if none exists. Returns
// the Informer from the map.
func (m *InformersMap) Get(ctx context.Context, gvk schema.GroupVersionKind, obj runtime.Object) (bool, *MapEntry, error) {
switch obj.(type) {
case *unstructured.Unstructured:
return m.unstructured.Get(ctx, gvk, obj)
case *unstructured.UnstructuredList:
return m.unstructured.Get(ctx, gvk, obj)
case *metav1.PartialObjectMetadata:
return m.metadata.Get(ctx, gvk, obj)
case *metav1.PartialObjectMetadataList:
return m.metadata.Get(ctx, gvk, obj)
default:
return m.structured.Get(ctx, gvk, obj)
}
}
// newStructuredInformersMap creates a new InformersMap for structured objects.
func newStructuredInformersMap(config *rest.Config, scheme *runtime.Scheme, mapper meta.RESTMapper, resync time.Duration,
namespace string, selectors SelectorsByGVK, disableDeepCopy DisableDeepCopyByGVK, transformers TransformFuncByObject) *specificInformersMap {
return newSpecificInformersMap(config, scheme, mapper, resync, namespace, selectors, disableDeepCopy, transformers, createStructuredListWatch)
}
// newUnstructuredInformersMap creates a new InformersMap for unstructured objects.
func newUnstructuredInformersMap(config *rest.Config, scheme *runtime.Scheme, mapper meta.RESTMapper, resync time.Duration,
namespace string, selectors SelectorsByGVK, disableDeepCopy DisableDeepCopyByGVK, transformers TransformFuncByObject) *specificInformersMap {
return newSpecificInformersMap(config, scheme, mapper, resync, namespace, selectors, disableDeepCopy, transformers, createUnstructuredListWatch)
}
// newMetadataInformersMap creates a new InformersMap for metadata-only objects.
func newMetadataInformersMap(config *rest.Config, scheme *runtime.Scheme, mapper meta.RESTMapper, resync time.Duration,
namespace string, selectors SelectorsByGVK, disableDeepCopy DisableDeepCopyByGVK, transformers TransformFuncByObject) *specificInformersMap {
return newSpecificInformersMap(config, scheme, mapper, resync, namespace, selectors, disableDeepCopy, transformers, createMetadataListWatch)
}

View File

@@ -0,0 +1,35 @@
/*
Copyright 2021 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package internal
import "k8s.io/apimachinery/pkg/runtime/schema"
// GroupVersionKindAll is the argument to represent all GroupVersionKind types.
var GroupVersionKindAll = schema.GroupVersionKind{}
// DisableDeepCopyByGVK associate a GroupVersionKind to disable DeepCopy during get or list from cache.
type DisableDeepCopyByGVK map[schema.GroupVersionKind]bool
// IsDisabled returns whether a GroupVersionKind is disabled DeepCopy.
func (disableByGVK DisableDeepCopyByGVK) IsDisabled(gvk schema.GroupVersionKind) bool {
if d, ok := disableByGVK[gvk]; ok {
return d
} else if d, ok = disableByGVK[GroupVersionKindAll]; ok {
return d
}
return false
}

View File

@@ -0,0 +1,480 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package internal
import (
"context"
"fmt"
"math/rand"
"sync"
"time"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/runtime/serializer"
"k8s.io/apimachinery/pkg/watch"
"k8s.io/client-go/dynamic"
"k8s.io/client-go/metadata"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/cache"
"sigs.k8s.io/controller-runtime/pkg/client/apiutil"
)
func init() {
rand.Seed(time.Now().UnixNano())
}
// clientListWatcherFunc knows how to create a ListWatcher.
type createListWatcherFunc func(gvk schema.GroupVersionKind, ip *specificInformersMap) (*cache.ListWatch, error)
// newSpecificInformersMap returns a new specificInformersMap (like
// the generical InformersMap, except that it doesn't implement WaitForCacheSync).
func newSpecificInformersMap(config *rest.Config,
scheme *runtime.Scheme,
mapper meta.RESTMapper,
resync time.Duration,
namespace string,
selectors SelectorsByGVK,
disableDeepCopy DisableDeepCopyByGVK,
transformers TransformFuncByObject,
createListWatcher createListWatcherFunc,
) *specificInformersMap {
ip := &specificInformersMap{
config: config,
Scheme: scheme,
mapper: mapper,
informersByGVK: make(map[schema.GroupVersionKind]*MapEntry),
codecs: serializer.NewCodecFactory(scheme),
paramCodec: runtime.NewParameterCodec(scheme),
resync: resync,
startWait: make(chan struct{}),
createListWatcher: createListWatcher,
namespace: namespace,
selectors: selectors.forGVK,
disableDeepCopy: disableDeepCopy,
transformers: transformers,
}
return ip
}
// MapEntry contains the cached data for an Informer.
type MapEntry struct {
// Informer is the cached informer
Informer cache.SharedIndexInformer
// CacheReader wraps Informer and implements the CacheReader interface for a single type
Reader CacheReader
}
// specificInformersMap create and caches Informers for (runtime.Object, schema.GroupVersionKind) pairs.
// It uses a standard parameter codec constructed based on the given generated Scheme.
type specificInformersMap struct {
// Scheme maps runtime.Objects to GroupVersionKinds
Scheme *runtime.Scheme
// config is used to talk to the apiserver
config *rest.Config
// mapper maps GroupVersionKinds to Resources
mapper meta.RESTMapper
// informersByGVK is the cache of informers keyed by groupVersionKind
informersByGVK map[schema.GroupVersionKind]*MapEntry
// codecs is used to create a new REST client
codecs serializer.CodecFactory
// paramCodec is used by list and watch
paramCodec runtime.ParameterCodec
// stop is the stop channel to stop informers
stop <-chan struct{}
// resync is the base frequency the informers are resynced
// a 10 percent jitter will be added to the resync period between informers
// so that all informers will not send list requests simultaneously.
resync time.Duration
// mu guards access to the map
mu sync.RWMutex
// start is true if the informers have been started
started bool
// startWait is a channel that is closed after the
// informer has been started.
startWait chan struct{}
// createClient knows how to create a client and a list object,
// and allows for abstracting over the particulars of structured vs
// unstructured objects.
createListWatcher createListWatcherFunc
// namespace is the namespace that all ListWatches are restricted to
// default or empty string means all namespaces
namespace string
// selectors are the label or field selectors that will be added to the
// ListWatch ListOptions.
selectors func(gvk schema.GroupVersionKind) Selector
// disableDeepCopy indicates not to deep copy objects during get or list objects.
disableDeepCopy DisableDeepCopyByGVK
// transform funcs are applied to objects before they are committed to the cache
transformers TransformFuncByObject
}
// Start calls Run on each of the informers and sets started to true. Blocks on the context.
// It doesn't return start because it can't return an error, and it's not a runnable directly.
func (ip *specificInformersMap) Start(ctx context.Context) {
func() {
ip.mu.Lock()
defer ip.mu.Unlock()
// Set the stop channel so it can be passed to informers that are added later
ip.stop = ctx.Done()
// Start each informer
for _, informer := range ip.informersByGVK {
go informer.Informer.Run(ctx.Done())
}
// Set started to true so we immediately start any informers added later.
ip.started = true
close(ip.startWait)
}()
<-ctx.Done()
}
func (ip *specificInformersMap) waitForStarted(ctx context.Context) bool {
select {
case <-ip.startWait:
return true
case <-ctx.Done():
return false
}
}
// HasSyncedFuncs returns all the HasSynced functions for the informers in this map.
func (ip *specificInformersMap) HasSyncedFuncs() []cache.InformerSynced {
ip.mu.RLock()
defer ip.mu.RUnlock()
syncedFuncs := make([]cache.InformerSynced, 0, len(ip.informersByGVK))
for _, informer := range ip.informersByGVK {
syncedFuncs = append(syncedFuncs, informer.Informer.HasSynced)
}
return syncedFuncs
}
// Get will create a new Informer and add it to the map of specificInformersMap if none exists. Returns
// the Informer from the map.
func (ip *specificInformersMap) Get(ctx context.Context, gvk schema.GroupVersionKind, obj runtime.Object) (bool, *MapEntry, error) {
// Return the informer if it is found
i, started, ok := func() (*MapEntry, bool, bool) {
ip.mu.RLock()
defer ip.mu.RUnlock()
i, ok := ip.informersByGVK[gvk]
return i, ip.started, ok
}()
if !ok {
var err error
if i, started, err = ip.addInformerToMap(gvk, obj); err != nil {
return started, nil, err
}
}
if started && !i.Informer.HasSynced() {
// Wait for it to sync before returning the Informer so that folks don't read from a stale cache.
if !cache.WaitForCacheSync(ctx.Done(), i.Informer.HasSynced) {
return started, nil, apierrors.NewTimeoutError(fmt.Sprintf("failed waiting for %T Informer to sync", obj), 0)
}
}
return started, i, nil
}
func (ip *specificInformersMap) addInformerToMap(gvk schema.GroupVersionKind, obj runtime.Object) (*MapEntry, bool, error) {
ip.mu.Lock()
defer ip.mu.Unlock()
// Check the cache to see if we already have an Informer. If we do, return the Informer.
// This is for the case where 2 routines tried to get the informer when it wasn't in the map
// so neither returned early, but the first one created it.
if i, ok := ip.informersByGVK[gvk]; ok {
return i, ip.started, nil
}
// Create a NewSharedIndexInformer and add it to the map.
var lw *cache.ListWatch
lw, err := ip.createListWatcher(gvk, ip)
if err != nil {
return nil, false, err
}
ni := cache.NewSharedIndexInformer(lw, obj, resyncPeriod(ip.resync)(), cache.Indexers{
cache.NamespaceIndex: cache.MetaNamespaceIndexFunc,
})
// Check to see if there is a transformer for this gvk
if err := ni.SetTransform(ip.transformers.Get(gvk)); err != nil {
return nil, false, err
}
rm, err := ip.mapper.RESTMapping(gvk.GroupKind(), gvk.Version)
if err != nil {
return nil, false, err
}
i := &MapEntry{
Informer: ni,
Reader: CacheReader{
indexer: ni.GetIndexer(),
groupVersionKind: gvk,
scopeName: rm.Scope.Name(),
disableDeepCopy: ip.disableDeepCopy.IsDisabled(gvk),
},
}
ip.informersByGVK[gvk] = i
// Start the Informer if need by
// TODO(seans): write thorough tests and document what happens here - can you add indexers?
// can you add eventhandlers?
if ip.started {
go i.Informer.Run(ip.stop)
}
return i, ip.started, nil
}
// newListWatch returns a new ListWatch object that can be used to create a SharedIndexInformer.
func createStructuredListWatch(gvk schema.GroupVersionKind, ip *specificInformersMap) (*cache.ListWatch, error) {
// Kubernetes APIs work against Resources, not GroupVersionKinds. Map the
// groupVersionKind to the Resource API we will use.
mapping, err := ip.mapper.RESTMapping(gvk.GroupKind(), gvk.Version)
if err != nil {
return nil, err
}
client, err := apiutil.RESTClientForGVK(gvk, false, ip.config, ip.codecs)
if err != nil {
return nil, err
}
listGVK := gvk.GroupVersion().WithKind(gvk.Kind + "List")
listObj, err := ip.Scheme.New(listGVK)
if err != nil {
return nil, err
}
// TODO: the functions that make use of this ListWatch should be adapted to
// pass in their own contexts instead of relying on this fixed one here.
ctx := context.TODO()
// Create a new ListWatch for the obj
return &cache.ListWatch{
ListFunc: func(opts metav1.ListOptions) (runtime.Object, error) {
ip.selectors(gvk).ApplyToList(&opts)
res := listObj.DeepCopyObject()
namespace := restrictNamespaceBySelector(ip.namespace, ip.selectors(gvk))
isNamespaceScoped := namespace != "" && mapping.Scope.Name() != meta.RESTScopeNameRoot
err := client.Get().NamespaceIfScoped(namespace, isNamespaceScoped).Resource(mapping.Resource.Resource).VersionedParams(&opts, ip.paramCodec).Do(ctx).Into(res)
return res, err
},
// Setup the watch function
WatchFunc: func(opts metav1.ListOptions) (watch.Interface, error) {
ip.selectors(gvk).ApplyToList(&opts)
// Watch needs to be set to true separately
opts.Watch = true
namespace := restrictNamespaceBySelector(ip.namespace, ip.selectors(gvk))
isNamespaceScoped := namespace != "" && mapping.Scope.Name() != meta.RESTScopeNameRoot
return client.Get().NamespaceIfScoped(namespace, isNamespaceScoped).Resource(mapping.Resource.Resource).VersionedParams(&opts, ip.paramCodec).Watch(ctx)
},
}, nil
}
func createUnstructuredListWatch(gvk schema.GroupVersionKind, ip *specificInformersMap) (*cache.ListWatch, error) {
// Kubernetes APIs work against Resources, not GroupVersionKinds. Map the
// groupVersionKind to the Resource API we will use.
mapping, err := ip.mapper.RESTMapping(gvk.GroupKind(), gvk.Version)
if err != nil {
return nil, err
}
// If the rest configuration has a negotiated serializer passed in,
// we should remove it and use the one that the dynamic client sets for us.
cfg := rest.CopyConfig(ip.config)
cfg.NegotiatedSerializer = nil
dynamicClient, err := dynamic.NewForConfig(cfg)
if err != nil {
return nil, err
}
// TODO: the functions that make use of this ListWatch should be adapted to
// pass in their own contexts instead of relying on this fixed one here.
ctx := context.TODO()
// Create a new ListWatch for the obj
return &cache.ListWatch{
ListFunc: func(opts metav1.ListOptions) (runtime.Object, error) {
ip.selectors(gvk).ApplyToList(&opts)
namespace := restrictNamespaceBySelector(ip.namespace, ip.selectors(gvk))
if namespace != "" && mapping.Scope.Name() != meta.RESTScopeNameRoot {
return dynamicClient.Resource(mapping.Resource).Namespace(namespace).List(ctx, opts)
}
return dynamicClient.Resource(mapping.Resource).List(ctx, opts)
},
// Setup the watch function
WatchFunc: func(opts metav1.ListOptions) (watch.Interface, error) {
ip.selectors(gvk).ApplyToList(&opts)
// Watch needs to be set to true separately
opts.Watch = true
namespace := restrictNamespaceBySelector(ip.namespace, ip.selectors(gvk))
if namespace != "" && mapping.Scope.Name() != meta.RESTScopeNameRoot {
return dynamicClient.Resource(mapping.Resource).Namespace(namespace).Watch(ctx, opts)
}
return dynamicClient.Resource(mapping.Resource).Watch(ctx, opts)
},
}, nil
}
func createMetadataListWatch(gvk schema.GroupVersionKind, ip *specificInformersMap) (*cache.ListWatch, error) {
// Kubernetes APIs work against Resources, not GroupVersionKinds. Map the
// groupVersionKind to the Resource API we will use.
mapping, err := ip.mapper.RESTMapping(gvk.GroupKind(), gvk.Version)
if err != nil {
return nil, err
}
// Always clear the negotiated serializer and use the one
// set from the metadata client.
cfg := rest.CopyConfig(ip.config)
cfg.NegotiatedSerializer = nil
// grab the metadata client
client, err := metadata.NewForConfig(cfg)
if err != nil {
return nil, err
}
// TODO: the functions that make use of this ListWatch should be adapted to
// pass in their own contexts instead of relying on this fixed one here.
ctx := context.TODO()
// create the relevant listwatch
return &cache.ListWatch{
ListFunc: func(opts metav1.ListOptions) (runtime.Object, error) {
ip.selectors(gvk).ApplyToList(&opts)
var (
list *metav1.PartialObjectMetadataList
err error
)
namespace := restrictNamespaceBySelector(ip.namespace, ip.selectors(gvk))
if namespace != "" && mapping.Scope.Name() != meta.RESTScopeNameRoot {
list, err = client.Resource(mapping.Resource).Namespace(namespace).List(ctx, opts)
} else {
list, err = client.Resource(mapping.Resource).List(ctx, opts)
}
if list != nil {
for i := range list.Items {
list.Items[i].SetGroupVersionKind(gvk)
}
}
return list, err
},
// Setup the watch function
WatchFunc: func(opts metav1.ListOptions) (watch.Interface, error) {
ip.selectors(gvk).ApplyToList(&opts)
// Watch needs to be set to true separately
opts.Watch = true
var (
watcher watch.Interface
err error
)
namespace := restrictNamespaceBySelector(ip.namespace, ip.selectors(gvk))
if namespace != "" && mapping.Scope.Name() != meta.RESTScopeNameRoot {
watcher, err = client.Resource(mapping.Resource).Namespace(namespace).Watch(ctx, opts)
} else {
watcher, err = client.Resource(mapping.Resource).Watch(ctx, opts)
}
if watcher != nil {
watcher = newGVKFixupWatcher(gvk, watcher)
}
return watcher, err
},
}, nil
}
// newGVKFixupWatcher adds a wrapper that preserves the GVK information when
// events come in.
//
// This works around a bug where GVK information is not passed into mapping
// functions when using the OnlyMetadata option in the builder.
// This issue is most likely caused by kubernetes/kubernetes#80609.
// See kubernetes-sigs/controller-runtime#1484.
//
// This was originally implemented as a cache.ResourceEventHandler wrapper but
// that contained a data race which was resolved by setting the GVK in a watch
// wrapper, before the objects are written to the cache.
// See kubernetes-sigs/controller-runtime#1650.
//
// The original watch wrapper was found to be incompatible with
// k8s.io/client-go/tools/cache.Reflector so it has been re-implemented as a
// watch.Filter which is compatible.
// See kubernetes-sigs/controller-runtime#1789.
func newGVKFixupWatcher(gvk schema.GroupVersionKind, watcher watch.Interface) watch.Interface {
return watch.Filter(
watcher,
func(in watch.Event) (watch.Event, bool) {
in.Object.GetObjectKind().SetGroupVersionKind(gvk)
return in, true
},
)
}
// resyncPeriod returns a function which generates a duration each time it is
// invoked; this is so that multiple controllers don't get into lock-step and all
// hammer the apiserver with list requests simultaneously.
func resyncPeriod(resync time.Duration) func() time.Duration {
return func() time.Duration {
// the factor will fall into [0.9, 1.1)
factor := rand.Float64()/5.0 + 0.9 //nolint:gosec
return time.Duration(float64(resync.Nanoseconds()) * factor)
}
}
// restrictNamespaceBySelector returns either a global restriction for all ListWatches
// if not default/empty, or the namespace that a ListWatch for the specific resource
// is restricted to, based on a specified field selector for metadata.namespace field.
func restrictNamespaceBySelector(namespaceOpt string, s Selector) string {
if namespaceOpt != "" {
// namespace is already restricted
return namespaceOpt
}
fieldSelector := s.Field
if fieldSelector == nil || fieldSelector.Empty() {
return ""
}
// check whether a selector includes the namespace field
value, found := fieldSelector.RequiresExactMatch("metadata.namespace")
if found {
return value
}
return ""
}

View File

@@ -0,0 +1,54 @@
/*
Copyright 2021 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package internal
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/fields"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime/schema"
)
// SelectorsByGVK associate a GroupVersionKind to a field/label selector.
type SelectorsByGVK map[schema.GroupVersionKind]Selector
func (s SelectorsByGVK) forGVK(gvk schema.GroupVersionKind) Selector {
if specific, found := s[gvk]; found {
return specific
}
if defaultSelector, found := s[schema.GroupVersionKind{}]; found {
return defaultSelector
}
return Selector{}
}
// Selector specify the label/field selector to fill in ListOptions.
type Selector struct {
Label labels.Selector
Field fields.Selector
}
// ApplyToList fill in ListOptions LabelSelector and FieldSelector if needed.
func (s Selector) ApplyToList(listOpts *metav1.ListOptions) {
if s.Label != nil {
listOpts.LabelSelector = s.Label.String()
}
if s.Field != nil {
listOpts.FieldSelector = s.Field.String()
}
}

View File

@@ -0,0 +1,50 @@
package internal
import (
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/client-go/tools/cache"
"sigs.k8s.io/controller-runtime/pkg/client/apiutil"
)
// TransformFuncByObject provides access to the correct transform function for
// any given GVK.
type TransformFuncByObject interface {
Set(runtime.Object, *runtime.Scheme, cache.TransformFunc) error
Get(schema.GroupVersionKind) cache.TransformFunc
SetDefault(transformer cache.TransformFunc)
}
type transformFuncByGVK struct {
defaultTransform cache.TransformFunc
transformers map[schema.GroupVersionKind]cache.TransformFunc
}
// NewTransformFuncByObject creates a new TransformFuncByObject instance.
func NewTransformFuncByObject() TransformFuncByObject {
return &transformFuncByGVK{
transformers: make(map[schema.GroupVersionKind]cache.TransformFunc),
defaultTransform: nil,
}
}
func (t *transformFuncByGVK) SetDefault(transformer cache.TransformFunc) {
t.defaultTransform = transformer
}
func (t *transformFuncByGVK) Set(obj runtime.Object, scheme *runtime.Scheme, transformer cache.TransformFunc) error {
gvk, err := apiutil.GVKForObject(obj, scheme)
if err != nil {
return err
}
t.transformers[gvk] = transformer
return nil
}
func (t transformFuncByGVK) Get(gvk schema.GroupVersionKind) cache.TransformFunc {
if val, ok := t.transformers[gvk]; ok {
return val
}
return t.defaultTransform
}

View File

@@ -0,0 +1,331 @@
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cache
import (
"context"
"fmt"
"time"
corev1 "k8s.io/api/core/v1"
apimeta "k8s.io/apimachinery/pkg/api/meta"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/client-go/rest"
toolscache "k8s.io/client-go/tools/cache"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/internal/objectutil"
)
// NewCacheFunc - Function for creating a new cache from the options and a rest config.
type NewCacheFunc func(config *rest.Config, opts Options) (Cache, error)
// a new global namespaced cache to handle cluster scoped resources.
const globalCache = "_cluster-scope"
// MultiNamespacedCacheBuilder - Builder function to create a new multi-namespaced cache.
// This will scope the cache to a list of namespaces. Listing for all namespaces
// will list for all the namespaces that this knows about. By default this will create
// a global cache for cluster scoped resource. Note that this is not intended
// to be used for excluding namespaces, this is better done via a Predicate. Also note that
// you may face performance issues when using this with a high number of namespaces.
func MultiNamespacedCacheBuilder(namespaces []string) NewCacheFunc {
return func(config *rest.Config, opts Options) (Cache, error) {
opts, err := defaultOpts(config, opts)
if err != nil {
return nil, err
}
caches := map[string]Cache{}
// create a cache for cluster scoped resources
gCache, err := New(config, opts)
if err != nil {
return nil, fmt.Errorf("error creating global cache: %w", err)
}
for _, ns := range namespaces {
opts.Namespace = ns
c, err := New(config, opts)
if err != nil {
return nil, err
}
caches[ns] = c
}
return &multiNamespaceCache{namespaceToCache: caches, Scheme: opts.Scheme, RESTMapper: opts.Mapper, clusterCache: gCache}, nil
}
}
// multiNamespaceCache knows how to handle multiple namespaced caches
// Use this feature when scoping permissions for your
// operator to a list of namespaces instead of watching every namespace
// in the cluster.
type multiNamespaceCache struct {
namespaceToCache map[string]Cache
Scheme *runtime.Scheme
RESTMapper apimeta.RESTMapper
clusterCache Cache
}
var _ Cache = &multiNamespaceCache{}
// Methods for multiNamespaceCache to conform to the Informers interface.
func (c *multiNamespaceCache) GetInformer(ctx context.Context, obj client.Object) (Informer, error) {
informers := map[string]Informer{}
// If the object is clusterscoped, get the informer from clusterCache,
// if not use the namespaced caches.
isNamespaced, err := objectutil.IsAPINamespaced(obj, c.Scheme, c.RESTMapper)
if err != nil {
return nil, err
}
if !isNamespaced {
clusterCacheInf, err := c.clusterCache.GetInformer(ctx, obj)
if err != nil {
return nil, err
}
informers[globalCache] = clusterCacheInf
return &multiNamespaceInformer{namespaceToInformer: informers}, nil
}
for ns, cache := range c.namespaceToCache {
informer, err := cache.GetInformer(ctx, obj)
if err != nil {
return nil, err
}
informers[ns] = informer
}
return &multiNamespaceInformer{namespaceToInformer: informers}, nil
}
func (c *multiNamespaceCache) GetInformerForKind(ctx context.Context, gvk schema.GroupVersionKind) (Informer, error) {
informers := map[string]Informer{}
// If the object is clusterscoped, get the informer from clusterCache,
// if not use the namespaced caches.
isNamespaced, err := objectutil.IsAPINamespacedWithGVK(gvk, c.Scheme, c.RESTMapper)
if err != nil {
return nil, err
}
if !isNamespaced {
clusterCacheInf, err := c.clusterCache.GetInformerForKind(ctx, gvk)
if err != nil {
return nil, err
}
informers[globalCache] = clusterCacheInf
return &multiNamespaceInformer{namespaceToInformer: informers}, nil
}
for ns, cache := range c.namespaceToCache {
informer, err := cache.GetInformerForKind(ctx, gvk)
if err != nil {
return nil, err
}
informers[ns] = informer
}
return &multiNamespaceInformer{namespaceToInformer: informers}, nil
}
func (c *multiNamespaceCache) Start(ctx context.Context) error {
// start global cache
go func() {
err := c.clusterCache.Start(ctx)
if err != nil {
log.Error(err, "cluster scoped cache failed to start")
}
}()
// start namespaced caches
for ns, cache := range c.namespaceToCache {
go func(ns string, cache Cache) {
err := cache.Start(ctx)
if err != nil {
log.Error(err, "multinamespace cache failed to start namespaced informer", "namespace", ns)
}
}(ns, cache)
}
<-ctx.Done()
return nil
}
func (c *multiNamespaceCache) WaitForCacheSync(ctx context.Context) bool {
synced := true
for _, cache := range c.namespaceToCache {
if s := cache.WaitForCacheSync(ctx); !s {
synced = s
}
}
// check if cluster scoped cache has synced
if !c.clusterCache.WaitForCacheSync(ctx) {
synced = false
}
return synced
}
func (c *multiNamespaceCache) IndexField(ctx context.Context, obj client.Object, field string, extractValue client.IndexerFunc) error {
isNamespaced, err := objectutil.IsAPINamespaced(obj, c.Scheme, c.RESTMapper)
if err != nil {
return nil //nolint:nilerr
}
if !isNamespaced {
return c.clusterCache.IndexField(ctx, obj, field, extractValue)
}
for _, cache := range c.namespaceToCache {
if err := cache.IndexField(ctx, obj, field, extractValue); err != nil {
return err
}
}
return nil
}
func (c *multiNamespaceCache) Get(ctx context.Context, key client.ObjectKey, obj client.Object, opts ...client.GetOption) error {
isNamespaced, err := objectutil.IsAPINamespaced(obj, c.Scheme, c.RESTMapper)
if err != nil {
return err
}
if !isNamespaced {
// Look into the global cache to fetch the object
return c.clusterCache.Get(ctx, key, obj)
}
cache, ok := c.namespaceToCache[key.Namespace]
if !ok {
return fmt.Errorf("unable to get: %v because of unknown namespace for the cache", key)
}
return cache.Get(ctx, key, obj)
}
// List multi namespace cache will get all the objects in the namespaces that the cache is watching if asked for all namespaces.
func (c *multiNamespaceCache) List(ctx context.Context, list client.ObjectList, opts ...client.ListOption) error {
listOpts := client.ListOptions{}
listOpts.ApplyOptions(opts)
isNamespaced, err := objectutil.IsAPINamespaced(list, c.Scheme, c.RESTMapper)
if err != nil {
return err
}
if !isNamespaced {
// Look at the global cache to get the objects with the specified GVK
return c.clusterCache.List(ctx, list, opts...)
}
if listOpts.Namespace != corev1.NamespaceAll {
cache, ok := c.namespaceToCache[listOpts.Namespace]
if !ok {
return fmt.Errorf("unable to get: %v because of unknown namespace for the cache", listOpts.Namespace)
}
return cache.List(ctx, list, opts...)
}
listAccessor, err := apimeta.ListAccessor(list)
if err != nil {
return err
}
allItems, err := apimeta.ExtractList(list)
if err != nil {
return err
}
limitSet := listOpts.Limit > 0
var resourceVersion string
for _, cache := range c.namespaceToCache {
listObj := list.DeepCopyObject().(client.ObjectList)
err = cache.List(ctx, listObj, &listOpts)
if err != nil {
return err
}
items, err := apimeta.ExtractList(listObj)
if err != nil {
return err
}
accessor, err := apimeta.ListAccessor(listObj)
if err != nil {
return fmt.Errorf("object: %T must be a list type", list)
}
allItems = append(allItems, items...)
// The last list call should have the most correct resource version.
resourceVersion = accessor.GetResourceVersion()
if limitSet {
// decrement Limit by the number of items
// fetched from the current namespace.
listOpts.Limit -= int64(len(items))
// if a Limit was set and the number of
// items read has reached this set limit,
// then stop reading.
if listOpts.Limit == 0 {
break
}
}
}
listAccessor.SetResourceVersion(resourceVersion)
return apimeta.SetList(list, allItems)
}
// multiNamespaceInformer knows how to handle interacting with the underlying informer across multiple namespaces.
type multiNamespaceInformer struct {
namespaceToInformer map[string]Informer
}
var _ Informer = &multiNamespaceInformer{}
// AddEventHandler adds the handler to each namespaced informer.
func (i *multiNamespaceInformer) AddEventHandler(handler toolscache.ResourceEventHandler) {
for _, informer := range i.namespaceToInformer {
informer.AddEventHandler(handler)
}
}
// AddEventHandlerWithResyncPeriod adds the handler with a resync period to each namespaced informer.
func (i *multiNamespaceInformer) AddEventHandlerWithResyncPeriod(handler toolscache.ResourceEventHandler, resyncPeriod time.Duration) {
for _, informer := range i.namespaceToInformer {
informer.AddEventHandlerWithResyncPeriod(handler, resyncPeriod)
}
}
// AddIndexers adds the indexer for each namespaced informer.
func (i *multiNamespaceInformer) AddIndexers(indexers toolscache.Indexers) error {
for _, informer := range i.namespaceToInformer {
err := informer.AddIndexers(indexers)
if err != nil {
return err
}
}
return nil
}
// HasSynced checks if each namespaced informer has synced.
func (i *multiNamespaceInformer) HasSynced() bool {
for _, informer := range i.namespaceToInformer {
if ok := informer.HasSynced(); !ok {
return ok
}
}
return true
}

View File

@@ -0,0 +1,166 @@
/*
Copyright 2021 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package certwatcher
import (
"context"
"crypto/tls"
"sync"
"github.com/fsnotify/fsnotify"
"sigs.k8s.io/controller-runtime/pkg/certwatcher/metrics"
logf "sigs.k8s.io/controller-runtime/pkg/internal/log"
)
var log = logf.RuntimeLog.WithName("certwatcher")
// CertWatcher watches certificate and key files for changes. When either file
// changes, it reads and parses both and calls an optional callback with the new
// certificate.
type CertWatcher struct {
sync.RWMutex
currentCert *tls.Certificate
watcher *fsnotify.Watcher
certPath string
keyPath string
}
// New returns a new CertWatcher watching the given certificate and key.
func New(certPath, keyPath string) (*CertWatcher, error) {
var err error
cw := &CertWatcher{
certPath: certPath,
keyPath: keyPath,
}
// Initial read of certificate and key.
if err := cw.ReadCertificate(); err != nil {
return nil, err
}
cw.watcher, err = fsnotify.NewWatcher()
if err != nil {
return nil, err
}
return cw, nil
}
// GetCertificate fetches the currently loaded certificate, which may be nil.
func (cw *CertWatcher) GetCertificate(_ *tls.ClientHelloInfo) (*tls.Certificate, error) {
cw.RLock()
defer cw.RUnlock()
return cw.currentCert, nil
}
// Start starts the watch on the certificate and key files.
func (cw *CertWatcher) Start(ctx context.Context) error {
files := []string{cw.certPath, cw.keyPath}
for _, f := range files {
if err := cw.watcher.Add(f); err != nil {
return err
}
}
go cw.Watch()
log.Info("Starting certificate watcher")
// Block until the context is done.
<-ctx.Done()
return cw.watcher.Close()
}
// Watch reads events from the watcher's channel and reacts to changes.
func (cw *CertWatcher) Watch() {
for {
select {
case event, ok := <-cw.watcher.Events:
// Channel is closed.
if !ok {
return
}
cw.handleEvent(event)
case err, ok := <-cw.watcher.Errors:
// Channel is closed.
if !ok {
return
}
log.Error(err, "certificate watch error")
}
}
}
// ReadCertificate reads the certificate and key files from disk, parses them,
// and updates the current certificate on the watcher. If a callback is set, it
// is invoked with the new certificate.
func (cw *CertWatcher) ReadCertificate() error {
metrics.ReadCertificateTotal.Inc()
cert, err := tls.LoadX509KeyPair(cw.certPath, cw.keyPath)
if err != nil {
metrics.ReadCertificateErrors.Inc()
return err
}
cw.Lock()
cw.currentCert = &cert
cw.Unlock()
log.Info("Updated current TLS certificate")
return nil
}
func (cw *CertWatcher) handleEvent(event fsnotify.Event) {
// Only care about events which may modify the contents of the file.
if !(isWrite(event) || isRemove(event) || isCreate(event)) {
return
}
log.V(1).Info("certificate event", "event", event)
// If the file was removed, re-add the watch.
if isRemove(event) {
if err := cw.watcher.Add(event.Name); err != nil {
log.Error(err, "error re-watching file")
}
}
if err := cw.ReadCertificate(); err != nil {
log.Error(err, "error re-reading certificate")
}
}
func isWrite(event fsnotify.Event) bool {
return event.Op&fsnotify.Write == fsnotify.Write
}
func isCreate(event fsnotify.Event) bool {
return event.Op&fsnotify.Create == fsnotify.Create
}
func isRemove(event fsnotify.Event) bool {
return event.Op&fsnotify.Remove == fsnotify.Remove
}

View File

@@ -0,0 +1,23 @@
/*
Copyright 2021 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
Package certwatcher is a helper for reloading Certificates from disk to be used
with tls servers. It provides a helper func `GetCertificate` which can be
called from `tls.Config` and passed into your tls.Listener. For a detailed
example server view pkg/webhook/server.go.
*/
package certwatcher

View File

@@ -0,0 +1,45 @@
/*
Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package metrics
import (
"github.com/prometheus/client_golang/prometheus"
"sigs.k8s.io/controller-runtime/pkg/metrics"
)
var (
// ReadCertificateTotal is a prometheus counter metrics which holds the total
// number of certificate reads.
ReadCertificateTotal = prometheus.NewCounter(prometheus.CounterOpts{
Name: "certwatcher_read_certificate_total",
Help: "Total number of certificate reads",
})
// ReadCertificateErrors is a prometheus counter metrics which holds the total
// number of errors from certificate read.
ReadCertificateErrors = prometheus.NewCounter(prometheus.CounterOpts{
Name: "certwatcher_read_certificate_errors_total",
Help: "Total number of certificate read errors",
})
)
func init() {
metrics.Registry.MustRegister(
ReadCertificateTotal,
ReadCertificateErrors,
)
}

View File

@@ -0,0 +1,196 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package apiutil contains utilities for working with raw Kubernetes
// API machinery, such as creating RESTMappers and raw REST clients,
// and extracting the GVK of an object.
package apiutil
import (
"fmt"
"reflect"
"sync"
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/runtime/serializer"
"k8s.io/client-go/discovery"
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
"k8s.io/client-go/rest"
"k8s.io/client-go/restmapper"
)
var (
protobufScheme = runtime.NewScheme()
protobufSchemeLock sync.RWMutex
)
func init() {
// Currently only enabled for built-in resources which are guaranteed to implement Protocol Buffers.
// For custom resources, CRDs can not support Protocol Buffers but Aggregated API can.
// See doc: https://kubernetes.io/docs/concepts/extend-kubernetes/api-extension/custom-resources/#advanced-features-and-flexibility
if err := clientgoscheme.AddToScheme(protobufScheme); err != nil {
panic(err)
}
}
// AddToProtobufScheme add the given SchemeBuilder into protobufScheme, which should
// be additional types that do support protobuf.
func AddToProtobufScheme(addToScheme func(*runtime.Scheme) error) error {
protobufSchemeLock.Lock()
defer protobufSchemeLock.Unlock()
return addToScheme(protobufScheme)
}
// NewDiscoveryRESTMapper constructs a new RESTMapper based on discovery
// information fetched by a new client with the given config.
func NewDiscoveryRESTMapper(c *rest.Config) (meta.RESTMapper, error) {
// Get a mapper
dc, err := discovery.NewDiscoveryClientForConfig(c)
if err != nil {
return nil, err
}
gr, err := restmapper.GetAPIGroupResources(dc)
if err != nil {
return nil, err
}
return restmapper.NewDiscoveryRESTMapper(gr), nil
}
// GVKForObject finds the GroupVersionKind associated with the given object, if there is only a single such GVK.
func GVKForObject(obj runtime.Object, scheme *runtime.Scheme) (schema.GroupVersionKind, error) {
// TODO(directxman12): do we want to generalize this to arbitrary container types?
// I think we'd need a generalized form of scheme or something. It's a
// shame there's not a reliable "GetGVK" interface that works by default
// for unpopulated static types and populated "dynamic" types
// (unstructured, partial, etc)
// check for PartialObjectMetadata, which is analogous to unstructured, but isn't handled by ObjectKinds
_, isPartial := obj.(*metav1.PartialObjectMetadata) //nolint:ifshort
_, isPartialList := obj.(*metav1.PartialObjectMetadataList)
if isPartial || isPartialList {
// we require that the GVK be populated in order to recognize the object
gvk := obj.GetObjectKind().GroupVersionKind()
if len(gvk.Kind) == 0 {
return schema.GroupVersionKind{}, runtime.NewMissingKindErr("unstructured object has no kind")
}
if len(gvk.Version) == 0 {
return schema.GroupVersionKind{}, runtime.NewMissingVersionErr("unstructured object has no version")
}
return gvk, nil
}
gvks, isUnversioned, err := scheme.ObjectKinds(obj)
if err != nil {
return schema.GroupVersionKind{}, err
}
if isUnversioned {
return schema.GroupVersionKind{}, fmt.Errorf("cannot create group-version-kind for unversioned type %T", obj)
}
if len(gvks) < 1 {
return schema.GroupVersionKind{}, fmt.Errorf("no group-version-kinds associated with type %T", obj)
}
if len(gvks) > 1 {
// this should only trigger for things like metav1.XYZ --
// normal versioned types should be fine
return schema.GroupVersionKind{}, fmt.Errorf(
"multiple group-version-kinds associated with type %T, refusing to guess at one", obj)
}
return gvks[0], nil
}
// RESTClientForGVK constructs a new rest.Interface capable of accessing the resource associated
// with the given GroupVersionKind. The REST client will be configured to use the negotiated serializer from
// baseConfig, if set, otherwise a default serializer will be set.
func RESTClientForGVK(gvk schema.GroupVersionKind, isUnstructured bool, baseConfig *rest.Config, codecs serializer.CodecFactory) (rest.Interface, error) {
return rest.RESTClientFor(createRestConfig(gvk, isUnstructured, baseConfig, codecs))
}
// serializerWithDecodedGVK is a CodecFactory that overrides the DecoderToVersion of a WithoutConversionCodecFactory
// in order to avoid clearing the GVK from the decoded object.
//
// See https://github.com/kubernetes/kubernetes/issues/80609.
type serializerWithDecodedGVK struct {
serializer.WithoutConversionCodecFactory
}
// DecoderToVersion returns an decoder that does not do conversion.
func (f serializerWithDecodedGVK) DecoderToVersion(serializer runtime.Decoder, _ runtime.GroupVersioner) runtime.Decoder {
return serializer
}
// createRestConfig copies the base config and updates needed fields for a new rest config.
func createRestConfig(gvk schema.GroupVersionKind, isUnstructured bool, baseConfig *rest.Config, codecs serializer.CodecFactory) *rest.Config {
gv := gvk.GroupVersion()
cfg := rest.CopyConfig(baseConfig)
cfg.GroupVersion = &gv
if gvk.Group == "" {
cfg.APIPath = "/api"
} else {
cfg.APIPath = "/apis"
}
if cfg.UserAgent == "" {
cfg.UserAgent = rest.DefaultKubernetesUserAgent()
}
// TODO(FillZpp): In the long run, we want to check discovery or something to make sure that this is actually true.
if cfg.ContentType == "" && !isUnstructured {
protobufSchemeLock.RLock()
if protobufScheme.Recognizes(gvk) {
cfg.ContentType = runtime.ContentTypeProtobuf
}
protobufSchemeLock.RUnlock()
}
if isUnstructured {
// If the object is unstructured, we need to preserve the GVK information.
// Use our own custom serializer.
cfg.NegotiatedSerializer = serializerWithDecodedGVK{serializer.WithoutConversionCodecFactory{CodecFactory: codecs}}
} else {
cfg.NegotiatedSerializer = serializerWithTargetZeroingDecode{NegotiatedSerializer: serializer.WithoutConversionCodecFactory{CodecFactory: codecs}}
}
return cfg
}
type serializerWithTargetZeroingDecode struct {
runtime.NegotiatedSerializer
}
func (s serializerWithTargetZeroingDecode) DecoderToVersion(serializer runtime.Decoder, r runtime.GroupVersioner) runtime.Decoder {
return targetZeroingDecoder{upstream: s.NegotiatedSerializer.DecoderToVersion(serializer, r)}
}
type targetZeroingDecoder struct {
upstream runtime.Decoder
}
func (t targetZeroingDecoder) Decode(data []byte, defaults *schema.GroupVersionKind, into runtime.Object) (runtime.Object, *schema.GroupVersionKind, error) {
zero(into)
return t.upstream.Decode(data, defaults, into)
}
// zero zeros the value of a pointer.
func zero(x interface{}) {
if x == nil {
return
}
res := reflect.ValueOf(x).Elem()
res.Set(reflect.Zero(res.Type()))
}

View File

@@ -0,0 +1,290 @@
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package apiutil
import (
"sync"
"sync/atomic"
"golang.org/x/time/rate"
"k8s.io/apimachinery/pkg/api/meta"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/client-go/discovery"
"k8s.io/client-go/rest"
"k8s.io/client-go/restmapper"
)
// dynamicRESTMapper is a RESTMapper that dynamically discovers resource
// types at runtime.
type dynamicRESTMapper struct {
mu sync.RWMutex // protects the following fields
staticMapper meta.RESTMapper
limiter *rate.Limiter
newMapper func() (meta.RESTMapper, error)
lazy bool
// Used for lazy init.
inited uint32
initMtx sync.Mutex
}
// DynamicRESTMapperOption is a functional option on the dynamicRESTMapper.
type DynamicRESTMapperOption func(*dynamicRESTMapper) error
// WithLimiter sets the RESTMapper's underlying limiter to lim.
func WithLimiter(lim *rate.Limiter) DynamicRESTMapperOption {
return func(drm *dynamicRESTMapper) error {
drm.limiter = lim
return nil
}
}
// WithLazyDiscovery prevents the RESTMapper from discovering REST mappings
// until an API call is made.
var WithLazyDiscovery DynamicRESTMapperOption = func(drm *dynamicRESTMapper) error {
drm.lazy = true
return nil
}
// WithCustomMapper supports setting a custom RESTMapper refresher instead of
// the default method, which uses a discovery client.
//
// This exists mainly for testing, but can be useful if you need tighter control
// over how discovery is performed, which discovery endpoints are queried, etc.
func WithCustomMapper(newMapper func() (meta.RESTMapper, error)) DynamicRESTMapperOption {
return func(drm *dynamicRESTMapper) error {
drm.newMapper = newMapper
return nil
}
}
// NewDynamicRESTMapper returns a dynamic RESTMapper for cfg. The dynamic
// RESTMapper dynamically discovers resource types at runtime. opts
// configure the RESTMapper.
func NewDynamicRESTMapper(cfg *rest.Config, opts ...DynamicRESTMapperOption) (meta.RESTMapper, error) {
client, err := discovery.NewDiscoveryClientForConfig(cfg)
if err != nil {
return nil, err
}
drm := &dynamicRESTMapper{
limiter: rate.NewLimiter(rate.Limit(defaultRefillRate), defaultLimitSize),
newMapper: func() (meta.RESTMapper, error) {
groupResources, err := restmapper.GetAPIGroupResources(client)
if err != nil {
return nil, err
}
return restmapper.NewDiscoveryRESTMapper(groupResources), nil
},
}
for _, opt := range opts {
if err = opt(drm); err != nil {
return nil, err
}
}
if !drm.lazy {
if err := drm.setStaticMapper(); err != nil {
return nil, err
}
}
return drm, nil
}
var (
// defaultRefilRate is the default rate at which potential calls are
// added back to the "bucket" of allowed calls.
defaultRefillRate = 5
// defaultLimitSize is the default starting/max number of potential calls
// per second. Once a call is used, it's added back to the bucket at a rate
// of defaultRefillRate per second.
defaultLimitSize = 5
)
// setStaticMapper sets drm's staticMapper by querying its client, regardless
// of reload backoff.
func (drm *dynamicRESTMapper) setStaticMapper() error {
newMapper, err := drm.newMapper()
if err != nil {
return err
}
drm.staticMapper = newMapper
return nil
}
// init initializes drm only once if drm is lazy.
func (drm *dynamicRESTMapper) init() (err error) {
// skip init if drm is not lazy or has initialized
if !drm.lazy || atomic.LoadUint32(&drm.inited) != 0 {
return nil
}
drm.initMtx.Lock()
defer drm.initMtx.Unlock()
if drm.inited == 0 {
if err = drm.setStaticMapper(); err == nil {
atomic.StoreUint32(&drm.inited, 1)
}
}
return err
}
// checkAndReload attempts to call the given callback, which is assumed to be dependent
// on the data in the restmapper.
//
// If the callback returns an error matching meta.IsNoMatchErr, it will attempt to reload
// the RESTMapper's data and re-call the callback once that's occurred.
// If the callback returns any other error, the function will return immediately regardless.
//
// It will take care of ensuring that reloads are rate-limited and that extraneous calls
// aren't made. If a reload would exceed the limiters rate, it returns the error return by
// the callback.
// It's thread-safe, and worries about thread-safety for the callback (so the callback does
// not need to attempt to lock the restmapper).
func (drm *dynamicRESTMapper) checkAndReload(checkNeedsReload func() error) error {
// first, check the common path -- data is fresh enough
// (use an IIFE for the lock's defer)
err := func() error {
drm.mu.RLock()
defer drm.mu.RUnlock()
return checkNeedsReload()
}()
needsReload := meta.IsNoMatchError(err)
if !needsReload {
return err
}
// if the data wasn't fresh, we'll need to try and update it, so grab the lock...
drm.mu.Lock()
defer drm.mu.Unlock()
// ... and double-check that we didn't reload in the meantime
err = checkNeedsReload()
needsReload = meta.IsNoMatchError(err)
if !needsReload {
return err
}
// we're still stale, so grab a rate-limit token if we can...
if !drm.limiter.Allow() {
// return error from static mapper here, we have refreshed often enough (exceeding rate of provided limiter)
// so that client's can handle this the same way as a "normal" NoResourceMatchError / NoKindMatchError
return err
}
// ...reload...
if err := drm.setStaticMapper(); err != nil {
return err
}
// ...and return the results of the closure regardless
return checkNeedsReload()
}
// TODO: wrap reload errors on NoKindMatchError with go 1.13 errors.
func (drm *dynamicRESTMapper) KindFor(resource schema.GroupVersionResource) (schema.GroupVersionKind, error) {
if err := drm.init(); err != nil {
return schema.GroupVersionKind{}, err
}
var gvk schema.GroupVersionKind
err := drm.checkAndReload(func() error {
var err error
gvk, err = drm.staticMapper.KindFor(resource)
return err
})
return gvk, err
}
func (drm *dynamicRESTMapper) KindsFor(resource schema.GroupVersionResource) ([]schema.GroupVersionKind, error) {
if err := drm.init(); err != nil {
return nil, err
}
var gvks []schema.GroupVersionKind
err := drm.checkAndReload(func() error {
var err error
gvks, err = drm.staticMapper.KindsFor(resource)
return err
})
return gvks, err
}
func (drm *dynamicRESTMapper) ResourceFor(input schema.GroupVersionResource) (schema.GroupVersionResource, error) {
if err := drm.init(); err != nil {
return schema.GroupVersionResource{}, err
}
var gvr schema.GroupVersionResource
err := drm.checkAndReload(func() error {
var err error
gvr, err = drm.staticMapper.ResourceFor(input)
return err
})
return gvr, err
}
func (drm *dynamicRESTMapper) ResourcesFor(input schema.GroupVersionResource) ([]schema.GroupVersionResource, error) {
if err := drm.init(); err != nil {
return nil, err
}
var gvrs []schema.GroupVersionResource
err := drm.checkAndReload(func() error {
var err error
gvrs, err = drm.staticMapper.ResourcesFor(input)
return err
})
return gvrs, err
}
func (drm *dynamicRESTMapper) RESTMapping(gk schema.GroupKind, versions ...string) (*meta.RESTMapping, error) {
if err := drm.init(); err != nil {
return nil, err
}
var mapping *meta.RESTMapping
err := drm.checkAndReload(func() error {
var err error
mapping, err = drm.staticMapper.RESTMapping(gk, versions...)
return err
})
return mapping, err
}
func (drm *dynamicRESTMapper) RESTMappings(gk schema.GroupKind, versions ...string) ([]*meta.RESTMapping, error) {
if err := drm.init(); err != nil {
return nil, err
}
var mappings []*meta.RESTMapping
err := drm.checkAndReload(func() error {
var err error
mappings, err = drm.staticMapper.RESTMappings(gk, versions...)
return err
})
return mappings, err
}
func (drm *dynamicRESTMapper) ResourceSingularizer(resource string) (string, error) {
if err := drm.init(); err != nil {
return "", err
}
var singular string
err := drm.checkAndReload(func() error {
var err error
singular, err = drm.staticMapper.ResourceSingularizer(resource)
return err
})
return singular, err
}

View File

@@ -0,0 +1,327 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package client
import (
"context"
"fmt"
"strings"
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/runtime/serializer"
"k8s.io/client-go/kubernetes/scheme"
"k8s.io/client-go/metadata"
"k8s.io/client-go/rest"
"sigs.k8s.io/controller-runtime/pkg/client/apiutil"
"sigs.k8s.io/controller-runtime/pkg/log"
)
// WarningHandlerOptions are options for configuring a
// warning handler for the client which is responsible
// for surfacing API Server warnings.
type WarningHandlerOptions struct {
// SuppressWarnings decides if the warnings from the
// API server are suppressed or surfaced in the client.
SuppressWarnings bool
// AllowDuplicateLogs does not deduplicate the to-be
// logged surfaced warnings messages. See
// log.WarningHandlerOptions for considerations
// regarding deduplication
AllowDuplicateLogs bool
}
// Options are creation options for a Client.
type Options struct {
// Scheme, if provided, will be used to map go structs to GroupVersionKinds
Scheme *runtime.Scheme
// Mapper, if provided, will be used to map GroupVersionKinds to Resources
Mapper meta.RESTMapper
// Opts is used to configure the warning handler responsible for
// surfacing and handling warnings messages sent by the API server.
Opts WarningHandlerOptions
}
// New returns a new Client using the provided config and Options.
// The returned client reads *and* writes directly from the server
// (it doesn't use object caches). It understands how to work with
// normal types (both custom resources and aggregated/built-in resources),
// as well as unstructured types.
//
// In the case of normal types, the scheme will be used to look up the
// corresponding group, version, and kind for the given type. In the
// case of unstructured types, the group, version, and kind will be extracted
// from the corresponding fields on the object.
func New(config *rest.Config, options Options) (Client, error) {
return newClient(config, options)
}
func newClient(config *rest.Config, options Options) (*client, error) {
if config == nil {
return nil, fmt.Errorf("must provide non-nil rest.Config to client.New")
}
if !options.Opts.SuppressWarnings {
// surface warnings
logger := log.Log.WithName("KubeAPIWarningLogger")
// Set a WarningHandler, the default WarningHandler
// is log.KubeAPIWarningLogger with deduplication enabled.
// See log.KubeAPIWarningLoggerOptions for considerations
// regarding deduplication.
config = rest.CopyConfig(config)
config.WarningHandler = log.NewKubeAPIWarningLogger(
logger,
log.KubeAPIWarningLoggerOptions{
Deduplicate: !options.Opts.AllowDuplicateLogs,
},
)
}
// Init a scheme if none provided
if options.Scheme == nil {
options.Scheme = scheme.Scheme
}
// Init a Mapper if none provided
if options.Mapper == nil {
var err error
options.Mapper, err = apiutil.NewDynamicRESTMapper(config)
if err != nil {
return nil, err
}
}
clientcache := &clientCache{
config: config,
scheme: options.Scheme,
mapper: options.Mapper,
codecs: serializer.NewCodecFactory(options.Scheme),
structuredResourceByType: make(map[schema.GroupVersionKind]*resourceMeta),
unstructuredResourceByType: make(map[schema.GroupVersionKind]*resourceMeta),
}
rawMetaClient, err := metadata.NewForConfig(config)
if err != nil {
return nil, fmt.Errorf("unable to construct metadata-only client for use as part of client: %w", err)
}
c := &client{
typedClient: typedClient{
cache: clientcache,
paramCodec: runtime.NewParameterCodec(options.Scheme),
},
unstructuredClient: unstructuredClient{
cache: clientcache,
paramCodec: noConversionParamCodec{},
},
metadataClient: metadataClient{
client: rawMetaClient,
restMapper: options.Mapper,
},
scheme: options.Scheme,
mapper: options.Mapper,
}
return c, nil
}
var _ Client = &client{}
// client is a client.Client that reads and writes directly from/to an API server. It lazily initializes
// new clients at the time they are used, and caches the client.
type client struct {
typedClient typedClient
unstructuredClient unstructuredClient
metadataClient metadataClient
scheme *runtime.Scheme
mapper meta.RESTMapper
}
// resetGroupVersionKind is a helper function to restore and preserve GroupVersionKind on an object.
func (c *client) resetGroupVersionKind(obj runtime.Object, gvk schema.GroupVersionKind) {
if gvk != schema.EmptyObjectKind.GroupVersionKind() {
if v, ok := obj.(schema.ObjectKind); ok {
v.SetGroupVersionKind(gvk)
}
}
}
// Scheme returns the scheme this client is using.
func (c *client) Scheme() *runtime.Scheme {
return c.scheme
}
// RESTMapper returns the scheme this client is using.
func (c *client) RESTMapper() meta.RESTMapper {
return c.mapper
}
// Create implements client.Client.
func (c *client) Create(ctx context.Context, obj Object, opts ...CreateOption) error {
switch obj.(type) {
case *unstructured.Unstructured:
return c.unstructuredClient.Create(ctx, obj, opts...)
case *metav1.PartialObjectMetadata:
return fmt.Errorf("cannot create using only metadata")
default:
return c.typedClient.Create(ctx, obj, opts...)
}
}
// Update implements client.Client.
func (c *client) Update(ctx context.Context, obj Object, opts ...UpdateOption) error {
defer c.resetGroupVersionKind(obj, obj.GetObjectKind().GroupVersionKind())
switch obj.(type) {
case *unstructured.Unstructured:
return c.unstructuredClient.Update(ctx, obj, opts...)
case *metav1.PartialObjectMetadata:
return fmt.Errorf("cannot update using only metadata -- did you mean to patch?")
default:
return c.typedClient.Update(ctx, obj, opts...)
}
}
// Delete implements client.Client.
func (c *client) Delete(ctx context.Context, obj Object, opts ...DeleteOption) error {
switch obj.(type) {
case *unstructured.Unstructured:
return c.unstructuredClient.Delete(ctx, obj, opts...)
case *metav1.PartialObjectMetadata:
return c.metadataClient.Delete(ctx, obj, opts...)
default:
return c.typedClient.Delete(ctx, obj, opts...)
}
}
// DeleteAllOf implements client.Client.
func (c *client) DeleteAllOf(ctx context.Context, obj Object, opts ...DeleteAllOfOption) error {
switch obj.(type) {
case *unstructured.Unstructured:
return c.unstructuredClient.DeleteAllOf(ctx, obj, opts...)
case *metav1.PartialObjectMetadata:
return c.metadataClient.DeleteAllOf(ctx, obj, opts...)
default:
return c.typedClient.DeleteAllOf(ctx, obj, opts...)
}
}
// Patch implements client.Client.
func (c *client) Patch(ctx context.Context, obj Object, patch Patch, opts ...PatchOption) error {
defer c.resetGroupVersionKind(obj, obj.GetObjectKind().GroupVersionKind())
switch obj.(type) {
case *unstructured.Unstructured:
return c.unstructuredClient.Patch(ctx, obj, patch, opts...)
case *metav1.PartialObjectMetadata:
return c.metadataClient.Patch(ctx, obj, patch, opts...)
default:
return c.typedClient.Patch(ctx, obj, patch, opts...)
}
}
// Get implements client.Client.
func (c *client) Get(ctx context.Context, key ObjectKey, obj Object, opts ...GetOption) error {
switch obj.(type) {
case *unstructured.Unstructured:
return c.unstructuredClient.Get(ctx, key, obj, opts...)
case *metav1.PartialObjectMetadata:
// Metadata only object should always preserve the GVK coming in from the caller.
defer c.resetGroupVersionKind(obj, obj.GetObjectKind().GroupVersionKind())
return c.metadataClient.Get(ctx, key, obj, opts...)
default:
return c.typedClient.Get(ctx, key, obj, opts...)
}
}
// List implements client.Client.
func (c *client) List(ctx context.Context, obj ObjectList, opts ...ListOption) error {
switch x := obj.(type) {
case *unstructured.UnstructuredList:
return c.unstructuredClient.List(ctx, obj, opts...)
case *metav1.PartialObjectMetadataList:
// Metadata only object should always preserve the GVK.
gvk := obj.GetObjectKind().GroupVersionKind()
defer c.resetGroupVersionKind(obj, gvk)
// Call the list client.
if err := c.metadataClient.List(ctx, obj, opts...); err != nil {
return err
}
// Restore the GVK for each item in the list.
itemGVK := schema.GroupVersionKind{
Group: gvk.Group,
Version: gvk.Version,
// TODO: this is producing unsafe guesses that don't actually work,
// but it matches ~99% of the cases out there.
Kind: strings.TrimSuffix(gvk.Kind, "List"),
}
for i := range x.Items {
item := &x.Items[i]
item.SetGroupVersionKind(itemGVK)
}
return nil
default:
return c.typedClient.List(ctx, obj, opts...)
}
}
// Status implements client.StatusClient.
func (c *client) Status() StatusWriter {
return &statusWriter{client: c}
}
// statusWriter is client.StatusWriter that writes status subresource.
type statusWriter struct {
client *client
}
// ensure statusWriter implements client.StatusWriter.
var _ StatusWriter = &statusWriter{}
// Update implements client.StatusWriter.
func (sw *statusWriter) Update(ctx context.Context, obj Object, opts ...UpdateOption) error {
defer sw.client.resetGroupVersionKind(obj, obj.GetObjectKind().GroupVersionKind())
switch obj.(type) {
case *unstructured.Unstructured:
return sw.client.unstructuredClient.UpdateStatus(ctx, obj, opts...)
case *metav1.PartialObjectMetadata:
return fmt.Errorf("cannot update status using only metadata -- did you mean to patch?")
default:
return sw.client.typedClient.UpdateStatus(ctx, obj, opts...)
}
}
// Patch implements client.Client.
func (sw *statusWriter) Patch(ctx context.Context, obj Object, patch Patch, opts ...PatchOption) error {
defer sw.client.resetGroupVersionKind(obj, obj.GetObjectKind().GroupVersionKind())
switch obj.(type) {
case *unstructured.Unstructured:
return sw.client.unstructuredClient.PatchStatus(ctx, obj, patch, opts...)
case *metav1.PartialObjectMetadata:
return sw.client.metadataClient.PatchStatus(ctx, obj, patch, opts...)
default:
return sw.client.typedClient.PatchStatus(ctx, obj, patch, opts...)
}
}

View File

@@ -0,0 +1,150 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package client
import (
"strings"
"sync"
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/runtime/serializer"
"k8s.io/client-go/rest"
"sigs.k8s.io/controller-runtime/pkg/client/apiutil"
)
// clientCache creates and caches rest clients and metadata for Kubernetes types.
type clientCache struct {
// config is the rest.Config to talk to an apiserver
config *rest.Config
// scheme maps go structs to GroupVersionKinds
scheme *runtime.Scheme
// mapper maps GroupVersionKinds to Resources
mapper meta.RESTMapper
// codecs are used to create a REST client for a gvk
codecs serializer.CodecFactory
// structuredResourceByType caches structured type metadata
structuredResourceByType map[schema.GroupVersionKind]*resourceMeta
// unstructuredResourceByType caches unstructured type metadata
unstructuredResourceByType map[schema.GroupVersionKind]*resourceMeta
mu sync.RWMutex
}
// newResource maps obj to a Kubernetes Resource and constructs a client for that Resource.
// If the object is a list, the resource represents the item's type instead.
func (c *clientCache) newResource(gvk schema.GroupVersionKind, isList, isUnstructured bool) (*resourceMeta, error) {
if strings.HasSuffix(gvk.Kind, "List") && isList {
// if this was a list, treat it as a request for the item's resource
gvk.Kind = gvk.Kind[:len(gvk.Kind)-4]
}
client, err := apiutil.RESTClientForGVK(gvk, isUnstructured, c.config, c.codecs)
if err != nil {
return nil, err
}
mapping, err := c.mapper.RESTMapping(gvk.GroupKind(), gvk.Version)
if err != nil {
return nil, err
}
return &resourceMeta{Interface: client, mapping: mapping, gvk: gvk}, nil
}
// getResource returns the resource meta information for the given type of object.
// If the object is a list, the resource represents the item's type instead.
func (c *clientCache) getResource(obj runtime.Object) (*resourceMeta, error) {
gvk, err := apiutil.GVKForObject(obj, c.scheme)
if err != nil {
return nil, err
}
_, isUnstructured := obj.(*unstructured.Unstructured)
_, isUnstructuredList := obj.(*unstructured.UnstructuredList)
isUnstructured = isUnstructured || isUnstructuredList
// It's better to do creation work twice than to not let multiple
// people make requests at once
c.mu.RLock()
resourceByType := c.structuredResourceByType
if isUnstructured {
resourceByType = c.unstructuredResourceByType
}
r, known := resourceByType[gvk]
c.mu.RUnlock()
if known {
return r, nil
}
// Initialize a new Client
c.mu.Lock()
defer c.mu.Unlock()
r, err = c.newResource(gvk, meta.IsListType(obj), isUnstructured)
if err != nil {
return nil, err
}
resourceByType[gvk] = r
return r, err
}
// getObjMeta returns objMeta containing both type and object metadata and state.
func (c *clientCache) getObjMeta(obj runtime.Object) (*objMeta, error) {
r, err := c.getResource(obj)
if err != nil {
return nil, err
}
m, err := meta.Accessor(obj)
if err != nil {
return nil, err
}
return &objMeta{resourceMeta: r, Object: m}, err
}
// resourceMeta caches state for a Kubernetes type.
type resourceMeta struct {
// client is the rest client used to talk to the apiserver
rest.Interface
// gvk is the GroupVersionKind of the resourceMeta
gvk schema.GroupVersionKind
// mapping is the rest mapping
mapping *meta.RESTMapping
}
// isNamespaced returns true if the type is namespaced.
func (r *resourceMeta) isNamespaced() bool {
return r.mapping.Scope.Name() != meta.RESTScopeNameRoot
}
// resource returns the resource name of the type.
func (r *resourceMeta) resource() string {
return r.mapping.Resource.Resource
}
// objMeta stores type and object information about a Kubernetes type.
type objMeta struct {
// resourceMeta contains type information for the object
*resourceMeta
// Object contains meta data for the object instance
metav1.Object
}

View File

@@ -0,0 +1,40 @@
/*
Copyright 2021 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package client
import (
"errors"
"net/url"
"k8s.io/apimachinery/pkg/conversion/queryparams"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
)
var _ runtime.ParameterCodec = noConversionParamCodec{}
// noConversionParamCodec is a no-conversion codec for serializing parameters into URL query strings.
// it's useful in scenarios with the unstructured client and arbitrary resources.
type noConversionParamCodec struct{}
func (noConversionParamCodec) EncodeParameters(obj runtime.Object, to schema.GroupVersion) (url.Values, error) {
return queryparams.Convert(obj)
}
func (noConversionParamCodec) DecodeParameters(parameters url.Values, from schema.GroupVersion, into runtime.Object) error {
return errors.New("DecodeParameters not implemented on noConversionParamCodec")
}

View File

@@ -0,0 +1,157 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package config
import (
"flag"
"fmt"
"os"
"os/user"
"path/filepath"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
logf "sigs.k8s.io/controller-runtime/pkg/internal/log"
)
var (
kubeconfig string
log = logf.RuntimeLog.WithName("client").WithName("config")
)
func init() {
// TODO: Fix this to allow double vendoring this library but still register flags on behalf of users
flag.StringVar(&kubeconfig, "kubeconfig", "",
"Paths to a kubeconfig. Only required if out-of-cluster.")
}
// GetConfig creates a *rest.Config for talking to a Kubernetes API server.
// If --kubeconfig is set, will use the kubeconfig file at that location. Otherwise will assume running
// in cluster and use the cluster provided kubeconfig.
//
// It also applies saner defaults for QPS and burst based on the Kubernetes
// controller manager defaults (20 QPS, 30 burst)
//
// Config precedence:
//
// * --kubeconfig flag pointing at a file
//
// * KUBECONFIG environment variable pointing at a file
//
// * In-cluster config if running in cluster
//
// * $HOME/.kube/config if exists.
func GetConfig() (*rest.Config, error) {
return GetConfigWithContext("")
}
// GetConfigWithContext creates a *rest.Config for talking to a Kubernetes API server with a specific context.
// If --kubeconfig is set, will use the kubeconfig file at that location. Otherwise will assume running
// in cluster and use the cluster provided kubeconfig.
//
// It also applies saner defaults for QPS and burst based on the Kubernetes
// controller manager defaults (20 QPS, 30 burst)
//
// Config precedence:
//
// * --kubeconfig flag pointing at a file
//
// * KUBECONFIG environment variable pointing at a file
//
// * In-cluster config if running in cluster
//
// * $HOME/.kube/config if exists.
func GetConfigWithContext(context string) (*rest.Config, error) {
cfg, err := loadConfig(context)
if err != nil {
return nil, err
}
if cfg.QPS == 0.0 {
cfg.QPS = 20.0
cfg.Burst = 30.0
}
return cfg, nil
}
// loadInClusterConfig is a function used to load the in-cluster
// Kubernetes client config. This variable makes is possible to
// test the precedence of loading the config.
var loadInClusterConfig = rest.InClusterConfig
// loadConfig loads a REST Config as per the rules specified in GetConfig.
func loadConfig(context string) (*rest.Config, error) {
// If a flag is specified with the config location, use that
if len(kubeconfig) > 0 {
return loadConfigWithContext("", &clientcmd.ClientConfigLoadingRules{ExplicitPath: kubeconfig}, context)
}
// If the recommended kubeconfig env variable is not specified,
// try the in-cluster config.
kubeconfigPath := os.Getenv(clientcmd.RecommendedConfigPathEnvVar)
if len(kubeconfigPath) == 0 {
if c, err := loadInClusterConfig(); err == nil {
return c, nil
}
}
// If the recommended kubeconfig env variable is set, or there
// is no in-cluster config, try the default recommended locations.
//
// NOTE: For default config file locations, upstream only checks
// $HOME for the user's home directory, but we can also try
// os/user.HomeDir when $HOME is unset.
//
// TODO(jlanford): could this be done upstream?
loadingRules := clientcmd.NewDefaultClientConfigLoadingRules()
if _, ok := os.LookupEnv("HOME"); !ok {
u, err := user.Current()
if err != nil {
return nil, fmt.Errorf("could not get current user: %w", err)
}
loadingRules.Precedence = append(loadingRules.Precedence, filepath.Join(u.HomeDir, clientcmd.RecommendedHomeDir, clientcmd.RecommendedFileName))
}
return loadConfigWithContext("", loadingRules, context)
}
func loadConfigWithContext(apiServerURL string, loader clientcmd.ClientConfigLoader, context string) (*rest.Config, error) {
return clientcmd.NewNonInteractiveDeferredLoadingClientConfig(
loader,
&clientcmd.ConfigOverrides{
ClusterInfo: clientcmdapi.Cluster{
Server: apiServerURL,
},
CurrentContext: context,
}).ClientConfig()
}
// GetConfigOrDie creates a *rest.Config for talking to a Kubernetes apiserver.
// If --kubeconfig is set, will use the kubeconfig file at that location. Otherwise will assume running
// in cluster and use the cluster provided kubeconfig.
//
// Will log an error and exit if there is an error creating the rest.Config.
func GetConfigOrDie() *rest.Config {
config, err := GetConfig()
if err != nil {
log.Error(err, "unable to get kubeconfig")
os.Exit(1)
}
return config
}

View File

@@ -0,0 +1,18 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package config contains libraries for initializing REST configs for talking to the Kubernetes API
package config

View File

@@ -0,0 +1,50 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package client contains functionality for interacting with Kubernetes API
// servers.
//
// # Clients
//
// Clients are split into two interfaces -- Readers and Writers. Readers
// get and list, while writers create, update, and delete.
//
// The New function can be used to create a new client that talks directly
// to the API server.
//
// It is a common pattern in Kubernetes to read from a cache and write to the API
// server. This pattern is covered by the DelegatingClient type, which can
// be used to have a client whose Reader is different from the Writer.
//
// # Options
//
// Many client operations in Kubernetes support options. These options are
// represented as variadic arguments at the end of a given method call.
// For instance, to use a label selector on list, you can call
//
// err := someReader.List(context.Background(), &podList, client.MatchingLabels{"somelabel": "someval"})
//
// # Indexing
//
// Indexes may be added to caches using a FieldIndexer. This allows you to easily
// and efficiently look up objects with certain properties. You can then make
// use of the index by specifying a field selector on calls to List on the Reader
// corresponding to the given Cache.
//
// For instance, a Secret controller might have an index on the
// `.spec.volumes.secret.secretName` field in Pod objects, so that it could
// easily look up all pods that reference a given secret.
package client

View File

@@ -0,0 +1,106 @@
/*
Copyright 2020 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package client
import (
"context"
"k8s.io/apimachinery/pkg/api/meta"
"k8s.io/apimachinery/pkg/runtime"
)
// NewDryRunClient wraps an existing client and enforces DryRun mode
// on all mutating api calls.
func NewDryRunClient(c Client) Client {
return &dryRunClient{client: c}
}
var _ Client = &dryRunClient{}
// dryRunClient is a Client that wraps another Client in order to enforce DryRun mode.
type dryRunClient struct {
client Client
}
// Scheme returns the scheme this client is using.
func (c *dryRunClient) Scheme() *runtime.Scheme {
return c.client.Scheme()
}
// RESTMapper returns the rest mapper this client is using.
func (c *dryRunClient) RESTMapper() meta.RESTMapper {
return c.client.RESTMapper()
}
// Create implements client.Client.
func (c *dryRunClient) Create(ctx context.Context, obj Object, opts ...CreateOption) error {
return c.client.Create(ctx, obj, append(opts, DryRunAll)...)
}
// Update implements client.Client.
func (c *dryRunClient) Update(ctx context.Context, obj Object, opts ...UpdateOption) error {
return c.client.Update(ctx, obj, append(opts, DryRunAll)...)
}
// Delete implements client.Client.
func (c *dryRunClient) Delete(ctx context.Context, obj Object, opts ...DeleteOption) error {
return c.client.Delete(ctx, obj, append(opts, DryRunAll)...)
}
// DeleteAllOf implements client.Client.
func (c *dryRunClient) DeleteAllOf(ctx context.Context, obj Object, opts ...DeleteAllOfOption) error {
return c.client.DeleteAllOf(ctx, obj, append(opts, DryRunAll)...)
}
// Patch implements client.Client.
func (c *dryRunClient) Patch(ctx context.Context, obj Object, patch Patch, opts ...PatchOption) error {
return c.client.Patch(ctx, obj, patch, append(opts, DryRunAll)...)
}
// Get implements client.Client.
func (c *dryRunClient) Get(ctx context.Context, key ObjectKey, obj Object, opts ...GetOption) error {
return c.client.Get(ctx, key, obj, opts...)
}
// List implements client.Client.
func (c *dryRunClient) List(ctx context.Context, obj ObjectList, opts ...ListOption) error {
return c.client.List(ctx, obj, opts...)
}
// Status implements client.StatusClient.
func (c *dryRunClient) Status() StatusWriter {
return &dryRunStatusWriter{client: c.client.Status()}
}
// ensure dryRunStatusWriter implements client.StatusWriter.
var _ StatusWriter = &dryRunStatusWriter{}
// dryRunStatusWriter is client.StatusWriter that writes status subresource with dryRun mode
// enforced.
type dryRunStatusWriter struct {
client StatusWriter
}
// Update implements client.StatusWriter.
func (sw *dryRunStatusWriter) Update(ctx context.Context, obj Object, opts ...UpdateOption) error {
return sw.client.Update(ctx, obj, append(opts, DryRunAll)...)
}
// Patch implements client.StatusWriter.
func (sw *dryRunStatusWriter) Patch(ctx context.Context, obj Object, patch Patch, opts ...PatchOption) error {
return sw.client.Patch(ctx, obj, patch, append(opts, DryRunAll)...)
}

View File

@@ -0,0 +1,787 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package fake
import (
"context"
"encoding/json"
"errors"
"fmt"
"reflect"
"strconv"
"strings"
"sync"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
utilrand "k8s.io/apimachinery/pkg/util/rand"
"k8s.io/apimachinery/pkg/util/validation/field"
"k8s.io/apimachinery/pkg/watch"
"k8s.io/client-go/kubernetes/scheme"
"k8s.io/client-go/testing"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/client/apiutil"
"sigs.k8s.io/controller-runtime/pkg/internal/objectutil"
)
type versionedTracker struct {
testing.ObjectTracker
scheme *runtime.Scheme
}
type fakeClient struct {
tracker versionedTracker
scheme *runtime.Scheme
restMapper meta.RESTMapper
schemeWriteLock sync.Mutex
}
var _ client.WithWatch = &fakeClient{}
const (
maxNameLength = 63
randomLength = 5
maxGeneratedNameLength = maxNameLength - randomLength
)
// NewFakeClient creates a new fake client for testing.
// You can choose to initialize it with a slice of runtime.Object.
//
// Deprecated: Please use NewClientBuilder instead.
func NewFakeClient(initObjs ...runtime.Object) client.WithWatch {
return NewClientBuilder().WithRuntimeObjects(initObjs...).Build()
}
// NewFakeClientWithScheme creates a new fake client with the given scheme
// for testing.
// You can choose to initialize it with a slice of runtime.Object.
//
// Deprecated: Please use NewClientBuilder instead.
func NewFakeClientWithScheme(clientScheme *runtime.Scheme, initObjs ...runtime.Object) client.WithWatch {
return NewClientBuilder().WithScheme(clientScheme).WithRuntimeObjects(initObjs...).Build()
}
// NewClientBuilder returns a new builder to create a fake client.
func NewClientBuilder() *ClientBuilder {
return &ClientBuilder{}
}
// ClientBuilder builds a fake client.
type ClientBuilder struct {
scheme *runtime.Scheme
restMapper meta.RESTMapper
initObject []client.Object
initLists []client.ObjectList
initRuntimeObjects []runtime.Object
objectTracker testing.ObjectTracker
}
// WithScheme sets this builder's internal scheme.
// If not set, defaults to client-go's global scheme.Scheme.
func (f *ClientBuilder) WithScheme(scheme *runtime.Scheme) *ClientBuilder {
f.scheme = scheme
return f
}
// WithRESTMapper sets this builder's restMapper.
// The restMapper is directly set as mapper in the Client. This can be used for example
// with a meta.DefaultRESTMapper to provide a static rest mapping.
// If not set, defaults to an empty meta.DefaultRESTMapper.
func (f *ClientBuilder) WithRESTMapper(restMapper meta.RESTMapper) *ClientBuilder {
f.restMapper = restMapper
return f
}
// WithObjects can be optionally used to initialize this fake client with client.Object(s).
func (f *ClientBuilder) WithObjects(initObjs ...client.Object) *ClientBuilder {
f.initObject = append(f.initObject, initObjs...)
return f
}
// WithLists can be optionally used to initialize this fake client with client.ObjectList(s).
func (f *ClientBuilder) WithLists(initLists ...client.ObjectList) *ClientBuilder {
f.initLists = append(f.initLists, initLists...)
return f
}
// WithRuntimeObjects can be optionally used to initialize this fake client with runtime.Object(s).
func (f *ClientBuilder) WithRuntimeObjects(initRuntimeObjs ...runtime.Object) *ClientBuilder {
f.initRuntimeObjects = append(f.initRuntimeObjects, initRuntimeObjs...)
return f
}
// WithObjectTracker can be optionally used to initialize this fake client with testing.ObjectTracker.
func (f *ClientBuilder) WithObjectTracker(ot testing.ObjectTracker) *ClientBuilder {
f.objectTracker = ot
return f
}
// Build builds and returns a new fake client.
func (f *ClientBuilder) Build() client.WithWatch {
if f.scheme == nil {
f.scheme = scheme.Scheme
}
if f.restMapper == nil {
f.restMapper = meta.NewDefaultRESTMapper([]schema.GroupVersion{})
}
var tracker versionedTracker
if f.objectTracker == nil {
tracker = versionedTracker{ObjectTracker: testing.NewObjectTracker(f.scheme, scheme.Codecs.UniversalDecoder()), scheme: f.scheme}
} else {
tracker = versionedTracker{ObjectTracker: f.objectTracker, scheme: f.scheme}
}
for _, obj := range f.initObject {
if err := tracker.Add(obj); err != nil {
panic(fmt.Errorf("failed to add object %v to fake client: %w", obj, err))
}
}
for _, obj := range f.initLists {
if err := tracker.Add(obj); err != nil {
panic(fmt.Errorf("failed to add list %v to fake client: %w", obj, err))
}
}
for _, obj := range f.initRuntimeObjects {
if err := tracker.Add(obj); err != nil {
panic(fmt.Errorf("failed to add runtime object %v to fake client: %w", obj, err))
}
}
return &fakeClient{
tracker: tracker,
scheme: f.scheme,
restMapper: f.restMapper,
}
}
const trackerAddResourceVersion = "999"
func (t versionedTracker) Add(obj runtime.Object) error {
var objects []runtime.Object
if meta.IsListType(obj) {
var err error
objects, err = meta.ExtractList(obj)
if err != nil {
return err
}
} else {
objects = []runtime.Object{obj}
}
for _, obj := range objects {
accessor, err := meta.Accessor(obj)
if err != nil {
return fmt.Errorf("failed to get accessor for object: %w", err)
}
if accessor.GetResourceVersion() == "" {
// We use a "magic" value of 999 here because this field
// is parsed as uint and and 0 is already used in Update.
// As we can't go lower, go very high instead so this can
// be recognized
accessor.SetResourceVersion(trackerAddResourceVersion)
}
obj, err = convertFromUnstructuredIfNecessary(t.scheme, obj)
if err != nil {
return err
}
if err := t.ObjectTracker.Add(obj); err != nil {
return err
}
}
return nil
}
func (t versionedTracker) Create(gvr schema.GroupVersionResource, obj runtime.Object, ns string) error {
accessor, err := meta.Accessor(obj)
if err != nil {
return fmt.Errorf("failed to get accessor for object: %w", err)
}
if accessor.GetName() == "" {
return apierrors.NewInvalid(
obj.GetObjectKind().GroupVersionKind().GroupKind(),
accessor.GetName(),
field.ErrorList{field.Required(field.NewPath("metadata.name"), "name is required")})
}
if accessor.GetResourceVersion() != "" {
return apierrors.NewBadRequest("resourceVersion can not be set for Create requests")
}
accessor.SetResourceVersion("1")
obj, err = convertFromUnstructuredIfNecessary(t.scheme, obj)
if err != nil {
return err
}
if err := t.ObjectTracker.Create(gvr, obj, ns); err != nil {
accessor.SetResourceVersion("")
return err
}
return nil
}
// convertFromUnstructuredIfNecessary will convert *unstructured.Unstructured for a GVK that is recocnized
// by the schema into the whatever the schema produces with New() for said GVK.
// This is required because the tracker unconditionally saves on manipulations, but its List() implementation
// tries to assign whatever it finds into a ListType it gets from schema.New() - Thus we have to ensure
// we save as the very same type, otherwise subsequent List requests will fail.
func convertFromUnstructuredIfNecessary(s *runtime.Scheme, o runtime.Object) (runtime.Object, error) {
u, isUnstructured := o.(*unstructured.Unstructured)
if !isUnstructured || !s.Recognizes(u.GroupVersionKind()) {
return o, nil
}
typed, err := s.New(u.GroupVersionKind())
if err != nil {
return nil, fmt.Errorf("scheme recognizes %s but failed to produce an object for it: %w", u.GroupVersionKind().String(), err)
}
unstructuredSerialized, err := json.Marshal(u)
if err != nil {
return nil, fmt.Errorf("failed to serialize %T: %w", unstructuredSerialized, err)
}
if err := json.Unmarshal(unstructuredSerialized, typed); err != nil {
return nil, fmt.Errorf("failed to unmarshal the content of %T into %T: %w", u, typed, err)
}
return typed, nil
}
func (t versionedTracker) Update(gvr schema.GroupVersionResource, obj runtime.Object, ns string) error {
accessor, err := meta.Accessor(obj)
if err != nil {
return fmt.Errorf("failed to get accessor for object: %w", err)
}
if accessor.GetName() == "" {
return apierrors.NewInvalid(
obj.GetObjectKind().GroupVersionKind().GroupKind(),
accessor.GetName(),
field.ErrorList{field.Required(field.NewPath("metadata.name"), "name is required")})
}
gvk := obj.GetObjectKind().GroupVersionKind()
if gvk.Empty() {
gvk, err = apiutil.GVKForObject(obj, t.scheme)
if err != nil {
return err
}
}
oldObject, err := t.ObjectTracker.Get(gvr, ns, accessor.GetName())
if err != nil {
// If the resource is not found and the resource allows create on update, issue a
// create instead.
if apierrors.IsNotFound(err) && allowsCreateOnUpdate(gvk) {
return t.Create(gvr, obj, ns)
}
return err
}
oldAccessor, err := meta.Accessor(oldObject)
if err != nil {
return err
}
// If the new object does not have the resource version set and it allows unconditional update,
// default it to the resource version of the existing resource
if accessor.GetResourceVersion() == "" && allowsUnconditionalUpdate(gvk) {
accessor.SetResourceVersion(oldAccessor.GetResourceVersion())
}
if accessor.GetResourceVersion() != oldAccessor.GetResourceVersion() {
return apierrors.NewConflict(gvr.GroupResource(), accessor.GetName(), errors.New("object was modified"))
}
if oldAccessor.GetResourceVersion() == "" {
oldAccessor.SetResourceVersion("0")
}
intResourceVersion, err := strconv.ParseUint(oldAccessor.GetResourceVersion(), 10, 64)
if err != nil {
return fmt.Errorf("can not convert resourceVersion %q to int: %w", oldAccessor.GetResourceVersion(), err)
}
intResourceVersion++
accessor.SetResourceVersion(strconv.FormatUint(intResourceVersion, 10))
if !accessor.GetDeletionTimestamp().IsZero() && len(accessor.GetFinalizers()) == 0 {
return t.ObjectTracker.Delete(gvr, accessor.GetNamespace(), accessor.GetName())
}
obj, err = convertFromUnstructuredIfNecessary(t.scheme, obj)
if err != nil {
return err
}
return t.ObjectTracker.Update(gvr, obj, ns)
}
func (c *fakeClient) Get(ctx context.Context, key client.ObjectKey, obj client.Object, opts ...client.GetOption) error {
gvr, err := getGVRFromObject(obj, c.scheme)
if err != nil {
return err
}
o, err := c.tracker.Get(gvr, key.Namespace, key.Name)
if err != nil {
return err
}
gvk, err := apiutil.GVKForObject(obj, c.scheme)
if err != nil {
return err
}
ta, err := meta.TypeAccessor(o)
if err != nil {
return err
}
ta.SetKind(gvk.Kind)
ta.SetAPIVersion(gvk.GroupVersion().String())
j, err := json.Marshal(o)
if err != nil {
return err
}
decoder := scheme.Codecs.UniversalDecoder()
zero(obj)
_, _, err = decoder.Decode(j, nil, obj)
return err
}
func (c *fakeClient) Watch(ctx context.Context, list client.ObjectList, opts ...client.ListOption) (watch.Interface, error) {
gvk, err := apiutil.GVKForObject(list, c.scheme)
if err != nil {
return nil, err
}
gvk.Kind = strings.TrimSuffix(gvk.Kind, "List")
listOpts := client.ListOptions{}
listOpts.ApplyOptions(opts)
gvr, _ := meta.UnsafeGuessKindToResource(gvk)
return c.tracker.Watch(gvr, listOpts.Namespace)
}
func (c *fakeClient) List(ctx context.Context, obj client.ObjectList, opts ...client.ListOption) error {
gvk, err := apiutil.GVKForObject(obj, c.scheme)
if err != nil {
return err
}
originalKind := gvk.Kind
gvk.Kind = strings.TrimSuffix(gvk.Kind, "List")
if _, isUnstructuredList := obj.(*unstructured.UnstructuredList); isUnstructuredList && !c.scheme.Recognizes(gvk) {
// We need to register the ListKind with UnstructuredList:
// https://github.com/kubernetes/kubernetes/blob/7b2776b89fb1be28d4e9203bdeec079be903c103/staging/src/k8s.io/client-go/dynamic/fake/simple.go#L44-L51
c.schemeWriteLock.Lock()
c.scheme.AddKnownTypeWithName(gvk.GroupVersion().WithKind(gvk.Kind+"List"), &unstructured.UnstructuredList{})
c.schemeWriteLock.Unlock()
}
listOpts := client.ListOptions{}
listOpts.ApplyOptions(opts)
gvr, _ := meta.UnsafeGuessKindToResource(gvk)
o, err := c.tracker.List(gvr, gvk, listOpts.Namespace)
if err != nil {
return err
}
ta, err := meta.TypeAccessor(o)
if err != nil {
return err
}
ta.SetKind(originalKind)
ta.SetAPIVersion(gvk.GroupVersion().String())
j, err := json.Marshal(o)
if err != nil {
return err
}
decoder := scheme.Codecs.UniversalDecoder()
zero(obj)
_, _, err = decoder.Decode(j, nil, obj)
if err != nil {
return err
}
if listOpts.LabelSelector != nil {
objs, err := meta.ExtractList(obj)
if err != nil {
return err
}
filteredObjs, err := objectutil.FilterWithLabels(objs, listOpts.LabelSelector)
if err != nil {
return err
}
err = meta.SetList(obj, filteredObjs)
if err != nil {
return err
}
}
return nil
}
func (c *fakeClient) Scheme() *runtime.Scheme {
return c.scheme
}
func (c *fakeClient) RESTMapper() meta.RESTMapper {
return c.restMapper
}
func (c *fakeClient) Create(ctx context.Context, obj client.Object, opts ...client.CreateOption) error {
createOptions := &client.CreateOptions{}
createOptions.ApplyOptions(opts)
for _, dryRunOpt := range createOptions.DryRun {
if dryRunOpt == metav1.DryRunAll {
return nil
}
}
gvr, err := getGVRFromObject(obj, c.scheme)
if err != nil {
return err
}
accessor, err := meta.Accessor(obj)
if err != nil {
return err
}
if accessor.GetName() == "" && accessor.GetGenerateName() != "" {
base := accessor.GetGenerateName()
if len(base) > maxGeneratedNameLength {
base = base[:maxGeneratedNameLength]
}
accessor.SetName(fmt.Sprintf("%s%s", base, utilrand.String(randomLength)))
}
return c.tracker.Create(gvr, obj, accessor.GetNamespace())
}
func (c *fakeClient) Delete(ctx context.Context, obj client.Object, opts ...client.DeleteOption) error {
gvr, err := getGVRFromObject(obj, c.scheme)
if err != nil {
return err
}
accessor, err := meta.Accessor(obj)
if err != nil {
return err
}
delOptions := client.DeleteOptions{}
delOptions.ApplyOptions(opts)
for _, dryRunOpt := range delOptions.DryRun {
if dryRunOpt == metav1.DryRunAll {
return nil
}
}
// Check the ResourceVersion if that Precondition was specified.
if delOptions.Preconditions != nil && delOptions.Preconditions.ResourceVersion != nil {
name := accessor.GetName()
dbObj, err := c.tracker.Get(gvr, accessor.GetNamespace(), name)
if err != nil {
return err
}
oldAccessor, err := meta.Accessor(dbObj)
if err != nil {
return err
}
actualRV := oldAccessor.GetResourceVersion()
expectRV := *delOptions.Preconditions.ResourceVersion
if actualRV != expectRV {
msg := fmt.Sprintf(
"the ResourceVersion in the precondition (%s) does not match the ResourceVersion in record (%s). "+
"The object might have been modified",
expectRV, actualRV)
return apierrors.NewConflict(gvr.GroupResource(), name, errors.New(msg))
}
}
return c.deleteObject(gvr, accessor)
}
func (c *fakeClient) DeleteAllOf(ctx context.Context, obj client.Object, opts ...client.DeleteAllOfOption) error {
gvk, err := apiutil.GVKForObject(obj, c.scheme)
if err != nil {
return err
}
dcOptions := client.DeleteAllOfOptions{}
dcOptions.ApplyOptions(opts)
for _, dryRunOpt := range dcOptions.DryRun {
if dryRunOpt == metav1.DryRunAll {
return nil
}
}
gvr, _ := meta.UnsafeGuessKindToResource(gvk)
o, err := c.tracker.List(gvr, gvk, dcOptions.Namespace)
if err != nil {
return err
}
objs, err := meta.ExtractList(o)
if err != nil {
return err
}
filteredObjs, err := objectutil.FilterWithLabels(objs, dcOptions.LabelSelector)
if err != nil {
return err
}
for _, o := range filteredObjs {
accessor, err := meta.Accessor(o)
if err != nil {
return err
}
err = c.deleteObject(gvr, accessor)
if err != nil {
return err
}
}
return nil
}
func (c *fakeClient) Update(ctx context.Context, obj client.Object, opts ...client.UpdateOption) error {
updateOptions := &client.UpdateOptions{}
updateOptions.ApplyOptions(opts)
for _, dryRunOpt := range updateOptions.DryRun {
if dryRunOpt == metav1.DryRunAll {
return nil
}
}
gvr, err := getGVRFromObject(obj, c.scheme)
if err != nil {
return err
}
accessor, err := meta.Accessor(obj)
if err != nil {
return err
}
return c.tracker.Update(gvr, obj, accessor.GetNamespace())
}
func (c *fakeClient) Patch(ctx context.Context, obj client.Object, patch client.Patch, opts ...client.PatchOption) error {
patchOptions := &client.PatchOptions{}
patchOptions.ApplyOptions(opts)
for _, dryRunOpt := range patchOptions.DryRun {
if dryRunOpt == metav1.DryRunAll {
return nil
}
}
gvr, err := getGVRFromObject(obj, c.scheme)
if err != nil {
return err
}
accessor, err := meta.Accessor(obj)
if err != nil {
return err
}
data, err := patch.Data(obj)
if err != nil {
return err
}
reaction := testing.ObjectReaction(c.tracker)
handled, o, err := reaction(testing.NewPatchAction(gvr, accessor.GetNamespace(), accessor.GetName(), patch.Type(), data))
if err != nil {
return err
}
if !handled {
panic("tracker could not handle patch method")
}
gvk, err := apiutil.GVKForObject(obj, c.scheme)
if err != nil {
return err
}
ta, err := meta.TypeAccessor(o)
if err != nil {
return err
}
ta.SetKind(gvk.Kind)
ta.SetAPIVersion(gvk.GroupVersion().String())
j, err := json.Marshal(o)
if err != nil {
return err
}
decoder := scheme.Codecs.UniversalDecoder()
zero(obj)
_, _, err = decoder.Decode(j, nil, obj)
return err
}
func (c *fakeClient) Status() client.StatusWriter {
return &fakeStatusWriter{client: c}
}
func (c *fakeClient) deleteObject(gvr schema.GroupVersionResource, accessor metav1.Object) error {
old, err := c.tracker.Get(gvr, accessor.GetNamespace(), accessor.GetName())
if err == nil {
oldAccessor, err := meta.Accessor(old)
if err == nil {
if len(oldAccessor.GetFinalizers()) > 0 {
now := metav1.Now()
oldAccessor.SetDeletionTimestamp(&now)
return c.tracker.Update(gvr, old, accessor.GetNamespace())
}
}
}
//TODO: implement propagation
return c.tracker.Delete(gvr, accessor.GetNamespace(), accessor.GetName())
}
func getGVRFromObject(obj runtime.Object, scheme *runtime.Scheme) (schema.GroupVersionResource, error) {
gvk, err := apiutil.GVKForObject(obj, scheme)
if err != nil {
return schema.GroupVersionResource{}, err
}
gvr, _ := meta.UnsafeGuessKindToResource(gvk)
return gvr, nil
}
type fakeStatusWriter struct {
client *fakeClient
}
func (sw *fakeStatusWriter) Update(ctx context.Context, obj client.Object, opts ...client.UpdateOption) error {
// TODO(droot): This results in full update of the obj (spec + status). Need
// a way to update status field only.
return sw.client.Update(ctx, obj, opts...)
}
func (sw *fakeStatusWriter) Patch(ctx context.Context, obj client.Object, patch client.Patch, opts ...client.PatchOption) error {
// TODO(droot): This results in full update of the obj (spec + status). Need
// a way to update status field only.
return sw.client.Patch(ctx, obj, patch, opts...)
}
func allowsUnconditionalUpdate(gvk schema.GroupVersionKind) bool {
switch gvk.Group {
case "apps":
switch gvk.Kind {
case "ControllerRevision", "DaemonSet", "Deployment", "ReplicaSet", "StatefulSet":
return true
}
case "autoscaling":
switch gvk.Kind {
case "HorizontalPodAutoscaler":
return true
}
case "batch":
switch gvk.Kind {
case "CronJob", "Job":
return true
}
case "certificates":
switch gvk.Kind {
case "Certificates":
return true
}
case "flowcontrol":
switch gvk.Kind {
case "FlowSchema", "PriorityLevelConfiguration":
return true
}
case "networking":
switch gvk.Kind {
case "Ingress", "IngressClass", "NetworkPolicy":
return true
}
case "policy":
switch gvk.Kind {
case "PodSecurityPolicy":
return true
}
case "rbac":
switch gvk.Kind {
case "ClusterRole", "ClusterRoleBinding", "Role", "RoleBinding":
return true
}
case "scheduling":
switch gvk.Kind {
case "PriorityClass":
return true
}
case "settings":
switch gvk.Kind {
case "PodPreset":
return true
}
case "storage":
switch gvk.Kind {
case "StorageClass":
return true
}
case "":
switch gvk.Kind {
case "ConfigMap", "Endpoint", "Event", "LimitRange", "Namespace", "Node",
"PersistentVolume", "PersistentVolumeClaim", "Pod", "PodTemplate",
"ReplicationController", "ResourceQuota", "Secret", "Service",
"ServiceAccount", "EndpointSlice":
return true
}
}
return false
}
func allowsCreateOnUpdate(gvk schema.GroupVersionKind) bool {
switch gvk.Group {
case "coordination":
switch gvk.Kind {
case "Lease":
return true
}
case "node":
switch gvk.Kind {
case "RuntimeClass":
return true
}
case "rbac":
switch gvk.Kind {
case "ClusterRole", "ClusterRoleBinding", "Role", "RoleBinding":
return true
}
case "":
switch gvk.Kind {
case "Endpoint", "Event", "LimitRange", "Service":
return true
}
}
return false
}
// zero zeros the value of a pointer.
func zero(x interface{}) {
if x == nil {
return
}
res := reflect.ValueOf(x).Elem()
res.Set(reflect.Zero(res.Type()))
}

View File

@@ -0,0 +1,38 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
Package fake provides a fake client for testing.
A fake client is backed by its simple object store indexed by GroupVersionResource.
You can create a fake client with optional objects.
client := NewFakeClientWithScheme(scheme, initObjs...) // initObjs is a slice of runtime.Object
You can invoke the methods defined in the Client interface.
When in doubt, it's almost always better not to use this package and instead use
envtest.Environment with a real client and API server.
WARNING: ⚠️ Current Limitations / Known Issues with the fake Client ⚠️
- This client does not have a way to inject specific errors to test handled vs. unhandled errors.
- There is some support for sub resources which can cause issues with tests if you're trying to update
e.g. metadata and status in the same reconcile.
- No OpenAPI validation is performed when creating or updating objects.
- ObjectMeta's `Generation` and `ResourceVersion` don't behave properly, Patch or Update
operations that rely on these fields will fail, or give false positives.
*/
package fake

View File

@@ -0,0 +1,155 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package client
import (
"context"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/meta"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/watch"
)
// ObjectKey identifies a Kubernetes Object.
type ObjectKey = types.NamespacedName
// ObjectKeyFromObject returns the ObjectKey given a runtime.Object.
func ObjectKeyFromObject(obj Object) ObjectKey {
return ObjectKey{Namespace: obj.GetNamespace(), Name: obj.GetName()}
}
// Patch is a patch that can be applied to a Kubernetes object.
type Patch interface {
// Type is the PatchType of the patch.
Type() types.PatchType
// Data is the raw data representing the patch.
Data(obj Object) ([]byte, error)
}
// TODO(directxman12): is there a sane way to deal with get/delete options?
// Reader knows how to read and list Kubernetes objects.
type Reader interface {
// Get retrieves an obj for the given object key from the Kubernetes Cluster.
// obj must be a struct pointer so that obj can be updated with the response
// returned by the Server.
Get(ctx context.Context, key ObjectKey, obj Object, opts ...GetOption) error
// List retrieves list of objects for a given namespace and list options. On a
// successful call, Items field in the list will be populated with the
// result returned from the server.
List(ctx context.Context, list ObjectList, opts ...ListOption) error
}
// Writer knows how to create, delete, and update Kubernetes objects.
type Writer interface {
// Create saves the object obj in the Kubernetes cluster.
Create(ctx context.Context, obj Object, opts ...CreateOption) error
// Delete deletes the given obj from Kubernetes cluster.
Delete(ctx context.Context, obj Object, opts ...DeleteOption) error
// Update updates the given obj in the Kubernetes cluster. obj must be a
// struct pointer so that obj can be updated with the content returned by the Server.
Update(ctx context.Context, obj Object, opts ...UpdateOption) error
// Patch patches the given obj in the Kubernetes cluster. obj must be a
// struct pointer so that obj can be updated with the content returned by the Server.
Patch(ctx context.Context, obj Object, patch Patch, opts ...PatchOption) error
// DeleteAllOf deletes all objects of the given type matching the given options.
DeleteAllOf(ctx context.Context, obj Object, opts ...DeleteAllOfOption) error
}
// StatusClient knows how to create a client which can update status subresource
// for kubernetes objects.
type StatusClient interface {
Status() StatusWriter
}
// StatusWriter knows how to update status subresource of a Kubernetes object.
type StatusWriter interface {
// Update updates the fields corresponding to the status subresource for the
// given obj. obj must be a struct pointer so that obj can be updated
// with the content returned by the Server.
Update(ctx context.Context, obj Object, opts ...UpdateOption) error
// Patch patches the given object's subresource. obj must be a struct
// pointer so that obj can be updated with the content returned by the
// Server.
Patch(ctx context.Context, obj Object, patch Patch, opts ...PatchOption) error
}
// Client knows how to perform CRUD operations on Kubernetes objects.
type Client interface {
Reader
Writer
StatusClient
// Scheme returns the scheme this client is using.
Scheme() *runtime.Scheme
// RESTMapper returns the rest this client is using.
RESTMapper() meta.RESTMapper
}
// WithWatch supports Watch on top of the CRUD operations supported by
// the normal Client. Its intended use-case are CLI apps that need to wait for
// events.
type WithWatch interface {
Client
Watch(ctx context.Context, obj ObjectList, opts ...ListOption) (watch.Interface, error)
}
// IndexerFunc knows how to take an object and turn it into a series
// of non-namespaced keys. Namespaced objects are automatically given
// namespaced and non-spaced variants, so keys do not need to include namespace.
type IndexerFunc func(Object) []string
// FieldIndexer knows how to index over a particular "field" such that it
// can later be used by a field selector.
type FieldIndexer interface {
// IndexFields adds an index with the given field name on the given object type
// by using the given function to extract the value for that field. If you want
// compatibility with the Kubernetes API server, only return one key, and only use
// fields that the API server supports. Otherwise, you can return multiple keys,
// and "equality" in the field selector means that at least one key matches the value.
// The FieldIndexer will automatically take care of indexing over namespace
// and supporting efficient all-namespace queries.
IndexField(ctx context.Context, obj Object, field string, extractValue IndexerFunc) error
}
// IgnoreNotFound returns nil on NotFound errors.
// All other values that are not NotFound errors or nil are returned unmodified.
func IgnoreNotFound(err error) error {
if apierrors.IsNotFound(err) {
return nil
}
return err
}
// IgnoreAlreadyExists returns nil on AlreadyExists errors.
// All other values that are not AlreadyExists errors or nil are returned unmodified.
func IgnoreAlreadyExists(err error) error {
if apierrors.IsAlreadyExists(err) {
return nil
}
return err
}

View File

@@ -0,0 +1,196 @@
/*
Copyright 2020 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package client
import (
"context"
"fmt"
"strings"
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/client-go/metadata"
)
// TODO(directxman12): we could rewrite this on top of the low-level REST
// client to avoid the extra shallow copy at the end, but I'm not sure it's
// worth it -- the metadata client deals with falling back to loading the whole
// object on older API servers, etc, and we'd have to reproduce that.
// metadataClient is a client that reads & writes metadata-only requests to/from the API server.
type metadataClient struct {
client metadata.Interface
restMapper meta.RESTMapper
}
func (mc *metadataClient) getResourceInterface(gvk schema.GroupVersionKind, ns string) (metadata.ResourceInterface, error) {
mapping, err := mc.restMapper.RESTMapping(gvk.GroupKind(), gvk.Version)
if err != nil {
return nil, err
}
if mapping.Scope.Name() == meta.RESTScopeNameRoot {
return mc.client.Resource(mapping.Resource), nil
}
return mc.client.Resource(mapping.Resource).Namespace(ns), nil
}
// Delete implements client.Client.
func (mc *metadataClient) Delete(ctx context.Context, obj Object, opts ...DeleteOption) error {
metadata, ok := obj.(*metav1.PartialObjectMetadata)
if !ok {
return fmt.Errorf("metadata client did not understand object: %T", obj)
}
resInt, err := mc.getResourceInterface(metadata.GroupVersionKind(), metadata.Namespace)
if err != nil {
return err
}
deleteOpts := DeleteOptions{}
deleteOpts.ApplyOptions(opts)
return resInt.Delete(ctx, metadata.Name, *deleteOpts.AsDeleteOptions())
}
// DeleteAllOf implements client.Client.
func (mc *metadataClient) DeleteAllOf(ctx context.Context, obj Object, opts ...DeleteAllOfOption) error {
metadata, ok := obj.(*metav1.PartialObjectMetadata)
if !ok {
return fmt.Errorf("metadata client did not understand object: %T", obj)
}
deleteAllOfOpts := DeleteAllOfOptions{}
deleteAllOfOpts.ApplyOptions(opts)
resInt, err := mc.getResourceInterface(metadata.GroupVersionKind(), deleteAllOfOpts.ListOptions.Namespace)
if err != nil {
return err
}
return resInt.DeleteCollection(ctx, *deleteAllOfOpts.AsDeleteOptions(), *deleteAllOfOpts.AsListOptions())
}
// Patch implements client.Client.
func (mc *metadataClient) Patch(ctx context.Context, obj Object, patch Patch, opts ...PatchOption) error {
metadata, ok := obj.(*metav1.PartialObjectMetadata)
if !ok {
return fmt.Errorf("metadata client did not understand object: %T", obj)
}
gvk := metadata.GroupVersionKind()
resInt, err := mc.getResourceInterface(gvk, metadata.Namespace)
if err != nil {
return err
}
data, err := patch.Data(obj)
if err != nil {
return err
}
patchOpts := &PatchOptions{}
patchOpts.ApplyOptions(opts)
res, err := resInt.Patch(ctx, metadata.Name, patch.Type(), data, *patchOpts.AsPatchOptions())
if err != nil {
return err
}
*metadata = *res
metadata.SetGroupVersionKind(gvk) // restore the GVK, which isn't set on metadata
return nil
}
// Get implements client.Client.
func (mc *metadataClient) Get(ctx context.Context, key ObjectKey, obj Object, opts ...GetOption) error {
metadata, ok := obj.(*metav1.PartialObjectMetadata)
if !ok {
return fmt.Errorf("metadata client did not understand object: %T", obj)
}
gvk := metadata.GroupVersionKind()
getOpts := GetOptions{}
getOpts.ApplyOptions(opts)
resInt, err := mc.getResourceInterface(gvk, key.Namespace)
if err != nil {
return err
}
res, err := resInt.Get(ctx, key.Name, *getOpts.AsGetOptions())
if err != nil {
return err
}
*metadata = *res
metadata.SetGroupVersionKind(gvk) // restore the GVK, which isn't set on metadata
return nil
}
// List implements client.Client.
func (mc *metadataClient) List(ctx context.Context, obj ObjectList, opts ...ListOption) error {
metadata, ok := obj.(*metav1.PartialObjectMetadataList)
if !ok {
return fmt.Errorf("metadata client did not understand object: %T", obj)
}
gvk := metadata.GroupVersionKind()
gvk.Kind = strings.TrimSuffix(gvk.Kind, "List")
listOpts := ListOptions{}
listOpts.ApplyOptions(opts)
resInt, err := mc.getResourceInterface(gvk, listOpts.Namespace)
if err != nil {
return err
}
res, err := resInt.List(ctx, *listOpts.AsListOptions())
if err != nil {
return err
}
*metadata = *res
metadata.SetGroupVersionKind(gvk) // restore the GVK, which isn't set on metadata
return nil
}
func (mc *metadataClient) PatchStatus(ctx context.Context, obj Object, patch Patch, opts ...PatchOption) error {
metadata, ok := obj.(*metav1.PartialObjectMetadata)
if !ok {
return fmt.Errorf("metadata client did not understand object: %T", obj)
}
gvk := metadata.GroupVersionKind()
resInt, err := mc.getResourceInterface(gvk, metadata.Namespace)
if err != nil {
return err
}
data, err := patch.Data(obj)
if err != nil {
return err
}
patchOpts := &PatchOptions{}
res, err := resInt.Patch(ctx, metadata.Name, patch.Type(), data, *patchOpts.AsPatchOptions(), "status")
if err != nil {
return err
}
*metadata = *res
metadata.SetGroupVersionKind(gvk) // restore the GVK, which isn't set on metadata
return nil
}

View File

@@ -0,0 +1,213 @@
/*
Copyright 2020 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package client
import (
"context"
"fmt"
"k8s.io/apimachinery/pkg/api/meta"
"k8s.io/apimachinery/pkg/runtime"
"sigs.k8s.io/controller-runtime/pkg/internal/objectutil"
)
// NewNamespacedClient wraps an existing client enforcing the namespace value.
// All functions using this client will have the same namespace declared here.
func NewNamespacedClient(c Client, ns string) Client {
return &namespacedClient{
client: c,
namespace: ns,
}
}
var _ Client = &namespacedClient{}
// namespacedClient is a Client that wraps another Client in order to enforce the specified namespace value.
type namespacedClient struct {
namespace string
client Client
}
// Scheme returns the scheme this client is using.
func (n *namespacedClient) Scheme() *runtime.Scheme {
return n.client.Scheme()
}
// RESTMapper returns the scheme this client is using.
func (n *namespacedClient) RESTMapper() meta.RESTMapper {
return n.client.RESTMapper()
}
// Create implements client.Client.
func (n *namespacedClient) Create(ctx context.Context, obj Object, opts ...CreateOption) error {
isNamespaceScoped, err := objectutil.IsAPINamespaced(obj, n.Scheme(), n.RESTMapper())
if err != nil {
return fmt.Errorf("error finding the scope of the object: %w", err)
}
objectNamespace := obj.GetNamespace()
if objectNamespace != n.namespace && objectNamespace != "" {
return fmt.Errorf("namespace %s of the object %s does not match the namespace %s on the client", objectNamespace, obj.GetName(), n.namespace)
}
if isNamespaceScoped && objectNamespace == "" {
obj.SetNamespace(n.namespace)
}
return n.client.Create(ctx, obj, opts...)
}
// Update implements client.Client.
func (n *namespacedClient) Update(ctx context.Context, obj Object, opts ...UpdateOption) error {
isNamespaceScoped, err := objectutil.IsAPINamespaced(obj, n.Scheme(), n.RESTMapper())
if err != nil {
return fmt.Errorf("error finding the scope of the object: %w", err)
}
objectNamespace := obj.GetNamespace()
if objectNamespace != n.namespace && objectNamespace != "" {
return fmt.Errorf("namespace %s of the object %s does not match the namespace %s on the client", objectNamespace, obj.GetName(), n.namespace)
}
if isNamespaceScoped && objectNamespace == "" {
obj.SetNamespace(n.namespace)
}
return n.client.Update(ctx, obj, opts...)
}
// Delete implements client.Client.
func (n *namespacedClient) Delete(ctx context.Context, obj Object, opts ...DeleteOption) error {
isNamespaceScoped, err := objectutil.IsAPINamespaced(obj, n.Scheme(), n.RESTMapper())
if err != nil {
return fmt.Errorf("error finding the scope of the object: %w", err)
}
objectNamespace := obj.GetNamespace()
if objectNamespace != n.namespace && objectNamespace != "" {
return fmt.Errorf("namespace %s of the object %s does not match the namespace %s on the client", objectNamespace, obj.GetName(), n.namespace)
}
if isNamespaceScoped && objectNamespace == "" {
obj.SetNamespace(n.namespace)
}
return n.client.Delete(ctx, obj, opts...)
}
// DeleteAllOf implements client.Client.
func (n *namespacedClient) DeleteAllOf(ctx context.Context, obj Object, opts ...DeleteAllOfOption) error {
isNamespaceScoped, err := objectutil.IsAPINamespaced(obj, n.Scheme(), n.RESTMapper())
if err != nil {
return fmt.Errorf("error finding the scope of the object: %w", err)
}
if isNamespaceScoped {
opts = append(opts, InNamespace(n.namespace))
}
return n.client.DeleteAllOf(ctx, obj, opts...)
}
// Patch implements client.Client.
func (n *namespacedClient) Patch(ctx context.Context, obj Object, patch Patch, opts ...PatchOption) error {
isNamespaceScoped, err := objectutil.IsAPINamespaced(obj, n.Scheme(), n.RESTMapper())
if err != nil {
return fmt.Errorf("error finding the scope of the object: %w", err)
}
objectNamespace := obj.GetNamespace()
if objectNamespace != n.namespace && objectNamespace != "" {
return fmt.Errorf("namespace %s of the object %s does not match the namespace %s on the client", objectNamespace, obj.GetName(), n.namespace)
}
if isNamespaceScoped && objectNamespace == "" {
obj.SetNamespace(n.namespace)
}
return n.client.Patch(ctx, obj, patch, opts...)
}
// Get implements client.Client.
func (n *namespacedClient) Get(ctx context.Context, key ObjectKey, obj Object, opts ...GetOption) error {
isNamespaceScoped, err := objectutil.IsAPINamespaced(obj, n.Scheme(), n.RESTMapper())
if err != nil {
return fmt.Errorf("error finding the scope of the object: %w", err)
}
if isNamespaceScoped {
if key.Namespace != "" && key.Namespace != n.namespace {
return fmt.Errorf("namespace %s provided for the object %s does not match the namespace %s on the client", key.Namespace, obj.GetName(), n.namespace)
}
key.Namespace = n.namespace
}
return n.client.Get(ctx, key, obj, opts...)
}
// List implements client.Client.
func (n *namespacedClient) List(ctx context.Context, obj ObjectList, opts ...ListOption) error {
if n.namespace != "" {
opts = append(opts, InNamespace(n.namespace))
}
return n.client.List(ctx, obj, opts...)
}
// Status implements client.StatusClient.
func (n *namespacedClient) Status() StatusWriter {
return &namespacedClientStatusWriter{StatusClient: n.client.Status(), namespace: n.namespace, namespacedclient: n}
}
// ensure namespacedClientStatusWriter implements client.StatusWriter.
var _ StatusWriter = &namespacedClientStatusWriter{}
type namespacedClientStatusWriter struct {
StatusClient StatusWriter
namespace string
namespacedclient Client
}
// Update implements client.StatusWriter.
func (nsw *namespacedClientStatusWriter) Update(ctx context.Context, obj Object, opts ...UpdateOption) error {
isNamespaceScoped, err := objectutil.IsAPINamespaced(obj, nsw.namespacedclient.Scheme(), nsw.namespacedclient.RESTMapper())
if err != nil {
return fmt.Errorf("error finding the scope of the object: %w", err)
}
objectNamespace := obj.GetNamespace()
if objectNamespace != nsw.namespace && objectNamespace != "" {
return fmt.Errorf("namespace %s of the object %s does not match the namespace %s on the client", objectNamespace, obj.GetName(), nsw.namespace)
}
if isNamespaceScoped && objectNamespace == "" {
obj.SetNamespace(nsw.namespace)
}
return nsw.StatusClient.Update(ctx, obj, opts...)
}
// Patch implements client.StatusWriter.
func (nsw *namespacedClientStatusWriter) Patch(ctx context.Context, obj Object, patch Patch, opts ...PatchOption) error {
isNamespaceScoped, err := objectutil.IsAPINamespaced(obj, nsw.namespacedclient.Scheme(), nsw.namespacedclient.RESTMapper())
if err != nil {
return fmt.Errorf("error finding the scope of the object: %w", err)
}
objectNamespace := obj.GetNamespace()
if objectNamespace != nsw.namespace && objectNamespace != "" {
return fmt.Errorf("namespace %s of the object %s does not match the namespace %s on the client", objectNamespace, obj.GetName(), nsw.namespace)
}
if isNamespaceScoped && objectNamespace == "" {
obj.SetNamespace(nsw.namespace)
}
return nsw.StatusClient.Patch(ctx, obj, patch, opts...)
}

View File

@@ -0,0 +1,77 @@
/*
Copyright 2020 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package client
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
)
// Object is a Kubernetes object, allows functions to work indistinctly with
// any resource that implements both Object interfaces.
//
// Semantically, these are objects which are both serializable (runtime.Object)
// and identifiable (metav1.Object) -- think any object which you could write
// as YAML or JSON, and then `kubectl create`.
//
// Code-wise, this means that any object which embeds both ObjectMeta (which
// provides metav1.Object) and TypeMeta (which provides half of runtime.Object)
// and has a `DeepCopyObject` implementation (the other half of runtime.Object)
// will implement this by default.
//
// For example, nearly all the built-in types are Objects, as well as all
// KubeBuilder-generated CRDs (unless you do something real funky to them).
//
// By and large, most things that implement runtime.Object also implement
// Object -- it's very rare to have *just* a runtime.Object implementation (the
// cases tend to be funky built-in types like Webhook payloads that don't have
// a `metadata` field).
//
// Notice that XYZList types are distinct: they implement ObjectList instead.
type Object interface {
metav1.Object
runtime.Object
}
// ObjectList is a Kubernetes object list, allows functions to work
// indistinctly with any resource that implements both runtime.Object and
// metav1.ListInterface interfaces.
//
// Semantically, this is any object which may be serialized (ObjectMeta), and
// is a kubernetes list wrapper (has items, pagination fields, etc) -- think
// the wrapper used in a response from a `kubectl list --output yaml` call.
//
// Code-wise, this means that any object which embedds both ListMeta (which
// provides metav1.ListInterface) and TypeMeta (which provides half of
// runtime.Object) and has a `DeepCopyObject` implementation (the other half of
// runtime.Object) will implement this by default.
//
// For example, nearly all the built-in XYZList types are ObjectLists, as well
// as the XYZList types for all KubeBuilder-generated CRDs (unless you do
// something real funky to them).
//
// By and large, most things that are XYZList and implement runtime.Object also
// implement ObjectList -- it's very rare to have *just* a runtime.Object
// implementation (the cases tend to be funky built-in types like Webhook
// payloads that don't have a `metadata` field).
//
// This is similar to Object, which is almost always implemented by the items
// in the list themselves.
type ObjectList interface {
metav1.ListInterface
runtime.Object
}

View File

@@ -0,0 +1,742 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package client
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/fields"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/selection"
)
// {{{ "Functional" Option Interfaces
// CreateOption is some configuration that modifies options for a create request.
type CreateOption interface {
// ApplyToCreate applies this configuration to the given create options.
ApplyToCreate(*CreateOptions)
}
// DeleteOption is some configuration that modifies options for a delete request.
type DeleteOption interface {
// ApplyToDelete applies this configuration to the given delete options.
ApplyToDelete(*DeleteOptions)
}
// GetOption is some configuration that modifies options for a get request.
type GetOption interface {
// ApplyToGet applies this configuration to the given get options.
ApplyToGet(*GetOptions)
}
// ListOption is some configuration that modifies options for a list request.
type ListOption interface {
// ApplyToList applies this configuration to the given list options.
ApplyToList(*ListOptions)
}
// UpdateOption is some configuration that modifies options for a update request.
type UpdateOption interface {
// ApplyToUpdate applies this configuration to the given update options.
ApplyToUpdate(*UpdateOptions)
}
// PatchOption is some configuration that modifies options for a patch request.
type PatchOption interface {
// ApplyToPatch applies this configuration to the given patch options.
ApplyToPatch(*PatchOptions)
}
// DeleteAllOfOption is some configuration that modifies options for a delete request.
type DeleteAllOfOption interface {
// ApplyToDeleteAllOf applies this configuration to the given deletecollection options.
ApplyToDeleteAllOf(*DeleteAllOfOptions)
}
// }}}
// {{{ Multi-Type Options
// DryRunAll sets the "dry run" option to "all", executing all
// validation, etc without persisting the change to storage.
var DryRunAll = dryRunAll{}
type dryRunAll struct{}
// ApplyToCreate applies this configuration to the given create options.
func (dryRunAll) ApplyToCreate(opts *CreateOptions) {
opts.DryRun = []string{metav1.DryRunAll}
}
// ApplyToUpdate applies this configuration to the given update options.
func (dryRunAll) ApplyToUpdate(opts *UpdateOptions) {
opts.DryRun = []string{metav1.DryRunAll}
}
// ApplyToPatch applies this configuration to the given patch options.
func (dryRunAll) ApplyToPatch(opts *PatchOptions) {
opts.DryRun = []string{metav1.DryRunAll}
}
// ApplyToPatch applies this configuration to the given delete options.
func (dryRunAll) ApplyToDelete(opts *DeleteOptions) {
opts.DryRun = []string{metav1.DryRunAll}
}
func (dryRunAll) ApplyToDeleteAllOf(opts *DeleteAllOfOptions) {
opts.DryRun = []string{metav1.DryRunAll}
}
// FieldOwner set the field manager name for the given server-side apply patch.
type FieldOwner string
// ApplyToPatch applies this configuration to the given patch options.
func (f FieldOwner) ApplyToPatch(opts *PatchOptions) {
opts.FieldManager = string(f)
}
// ApplyToCreate applies this configuration to the given create options.
func (f FieldOwner) ApplyToCreate(opts *CreateOptions) {
opts.FieldManager = string(f)
}
// ApplyToUpdate applies this configuration to the given update options.
func (f FieldOwner) ApplyToUpdate(opts *UpdateOptions) {
opts.FieldManager = string(f)
}
// }}}
// {{{ Create Options
// CreateOptions contains options for create requests. It's generally a subset
// of metav1.CreateOptions.
type CreateOptions struct {
// When present, indicates that modifications should not be
// persisted. An invalid or unrecognized dryRun directive will
// result in an error response and no further processing of the
// request. Valid values are:
// - All: all dry run stages will be processed
DryRun []string
// FieldManager is the name of the user or component submitting
// this request. It must be set with server-side apply.
FieldManager string
// Raw represents raw CreateOptions, as passed to the API server.
Raw *metav1.CreateOptions
}
// AsCreateOptions returns these options as a metav1.CreateOptions.
// This may mutate the Raw field.
func (o *CreateOptions) AsCreateOptions() *metav1.CreateOptions {
if o == nil {
return &metav1.CreateOptions{}
}
if o.Raw == nil {
o.Raw = &metav1.CreateOptions{}
}
o.Raw.DryRun = o.DryRun
o.Raw.FieldManager = o.FieldManager
return o.Raw
}
// ApplyOptions applies the given create options on these options,
// and then returns itself (for convenient chaining).
func (o *CreateOptions) ApplyOptions(opts []CreateOption) *CreateOptions {
for _, opt := range opts {
opt.ApplyToCreate(o)
}
return o
}
// ApplyToCreate implements CreateOption.
func (o *CreateOptions) ApplyToCreate(co *CreateOptions) {
if o.DryRun != nil {
co.DryRun = o.DryRun
}
if o.FieldManager != "" {
co.FieldManager = o.FieldManager
}
if o.Raw != nil {
co.Raw = o.Raw
}
}
var _ CreateOption = &CreateOptions{}
// }}}
// {{{ Delete Options
// DeleteOptions contains options for delete requests. It's generally a subset
// of metav1.DeleteOptions.
type DeleteOptions struct {
// GracePeriodSeconds is the duration in seconds before the object should be
// deleted. Value must be non-negative integer. The value zero indicates
// delete immediately. If this value is nil, the default grace period for the
// specified type will be used.
GracePeriodSeconds *int64
// Preconditions must be fulfilled before a deletion is carried out. If not
// possible, a 409 Conflict status will be returned.
Preconditions *metav1.Preconditions
// PropagationPolicy determined whether and how garbage collection will be
// performed. Either this field or OrphanDependents may be set, but not both.
// The default policy is decided by the existing finalizer set in the
// metadata.finalizers and the resource-specific default policy.
// Acceptable values are: 'Orphan' - orphan the dependents; 'Background' -
// allow the garbage collector to delete the dependents in the background;
// 'Foreground' - a cascading policy that deletes all dependents in the
// foreground.
PropagationPolicy *metav1.DeletionPropagation
// Raw represents raw DeleteOptions, as passed to the API server.
Raw *metav1.DeleteOptions
// When present, indicates that modifications should not be
// persisted. An invalid or unrecognized dryRun directive will
// result in an error response and no further processing of the
// request. Valid values are:
// - All: all dry run stages will be processed
DryRun []string
}
// AsDeleteOptions returns these options as a metav1.DeleteOptions.
// This may mutate the Raw field.
func (o *DeleteOptions) AsDeleteOptions() *metav1.DeleteOptions {
if o == nil {
return &metav1.DeleteOptions{}
}
if o.Raw == nil {
o.Raw = &metav1.DeleteOptions{}
}
o.Raw.GracePeriodSeconds = o.GracePeriodSeconds
o.Raw.Preconditions = o.Preconditions
o.Raw.PropagationPolicy = o.PropagationPolicy
o.Raw.DryRun = o.DryRun
return o.Raw
}
// ApplyOptions applies the given delete options on these options,
// and then returns itself (for convenient chaining).
func (o *DeleteOptions) ApplyOptions(opts []DeleteOption) *DeleteOptions {
for _, opt := range opts {
opt.ApplyToDelete(o)
}
return o
}
var _ DeleteOption = &DeleteOptions{}
// ApplyToDelete implements DeleteOption.
func (o *DeleteOptions) ApplyToDelete(do *DeleteOptions) {
if o.GracePeriodSeconds != nil {
do.GracePeriodSeconds = o.GracePeriodSeconds
}
if o.Preconditions != nil {
do.Preconditions = o.Preconditions
}
if o.PropagationPolicy != nil {
do.PropagationPolicy = o.PropagationPolicy
}
if o.Raw != nil {
do.Raw = o.Raw
}
if o.DryRun != nil {
do.DryRun = o.DryRun
}
}
// GracePeriodSeconds sets the grace period for the deletion
// to the given number of seconds.
type GracePeriodSeconds int64
// ApplyToDelete applies this configuration to the given delete options.
func (s GracePeriodSeconds) ApplyToDelete(opts *DeleteOptions) {
secs := int64(s)
opts.GracePeriodSeconds = &secs
}
// ApplyToDeleteAllOf applies this configuration to the given an List options.
func (s GracePeriodSeconds) ApplyToDeleteAllOf(opts *DeleteAllOfOptions) {
s.ApplyToDelete(&opts.DeleteOptions)
}
// Preconditions must be fulfilled before an operation (update, delete, etc.) is carried out.
type Preconditions metav1.Preconditions
// ApplyToDelete applies this configuration to the given delete options.
func (p Preconditions) ApplyToDelete(opts *DeleteOptions) {
preconds := metav1.Preconditions(p)
opts.Preconditions = &preconds
}
// ApplyToDeleteAllOf applies this configuration to the given an List options.
func (p Preconditions) ApplyToDeleteAllOf(opts *DeleteAllOfOptions) {
p.ApplyToDelete(&opts.DeleteOptions)
}
// PropagationPolicy determined whether and how garbage collection will be
// performed. Either this field or OrphanDependents may be set, but not both.
// The default policy is decided by the existing finalizer set in the
// metadata.finalizers and the resource-specific default policy.
// Acceptable values are: 'Orphan' - orphan the dependents; 'Background' -
// allow the garbage collector to delete the dependents in the background;
// 'Foreground' - a cascading policy that deletes all dependents in the
// foreground.
type PropagationPolicy metav1.DeletionPropagation
// ApplyToDelete applies the given delete options on these options.
// It will propagate to the dependents of the object to let the garbage collector handle it.
func (p PropagationPolicy) ApplyToDelete(opts *DeleteOptions) {
policy := metav1.DeletionPropagation(p)
opts.PropagationPolicy = &policy
}
// ApplyToDeleteAllOf applies this configuration to the given an List options.
func (p PropagationPolicy) ApplyToDeleteAllOf(opts *DeleteAllOfOptions) {
p.ApplyToDelete(&opts.DeleteOptions)
}
// }}}
// {{{ Get Options
// GetOptions contains options for get operation.
// Now it only has a Raw field, with support for specific resourceVersion.
type GetOptions struct {
// Raw represents raw GetOptions, as passed to the API server. Note
// that these may not be respected by all implementations of interface.
Raw *metav1.GetOptions
}
var _ GetOption = &GetOptions{}
// ApplyToGet implements GetOption for GetOptions.
func (o *GetOptions) ApplyToGet(lo *GetOptions) {
if o.Raw != nil {
lo.Raw = o.Raw
}
}
// AsGetOptions returns these options as a flattened metav1.GetOptions.
// This may mutate the Raw field.
func (o *GetOptions) AsGetOptions() *metav1.GetOptions {
if o == nil || o.Raw == nil {
return &metav1.GetOptions{}
}
return o.Raw
}
// ApplyOptions applies the given get options on these options,
// and then returns itself (for convenient chaining).
func (o *GetOptions) ApplyOptions(opts []GetOption) *GetOptions {
for _, opt := range opts {
opt.ApplyToGet(o)
}
return o
}
// }}}
// {{{ List Options
// ListOptions contains options for limiting or filtering results.
// It's generally a subset of metav1.ListOptions, with support for
// pre-parsed selectors (since generally, selectors will be executed
// against the cache).
type ListOptions struct {
// LabelSelector filters results by label. Use labels.Parse() to
// set from raw string form.
LabelSelector labels.Selector
// FieldSelector filters results by a particular field. In order
// to use this with cache-based implementations, restrict usage to
// a single field-value pair that's been added to the indexers.
FieldSelector fields.Selector
// Namespace represents the namespace to list for, or empty for
// non-namespaced objects, or to list across all namespaces.
Namespace string
// Limit specifies the maximum number of results to return from the server. The server may
// not support this field on all resource types, but if it does and more results remain it
// will set the continue field on the returned list object. This field is not supported if watch
// is true in the Raw ListOptions.
Limit int64
// Continue is a token returned by the server that lets a client retrieve chunks of results
// from the server by specifying limit. The server may reject requests for continuation tokens
// it does not recognize and will return a 410 error if the token can no longer be used because
// it has expired. This field is not supported if watch is true in the Raw ListOptions.
Continue string
// Raw represents raw ListOptions, as passed to the API server. Note
// that these may not be respected by all implementations of interface,
// and the LabelSelector, FieldSelector, Limit and Continue fields are ignored.
Raw *metav1.ListOptions
}
var _ ListOption = &ListOptions{}
// ApplyToList implements ListOption for ListOptions.
func (o *ListOptions) ApplyToList(lo *ListOptions) {
if o.LabelSelector != nil {
lo.LabelSelector = o.LabelSelector
}
if o.FieldSelector != nil {
lo.FieldSelector = o.FieldSelector
}
if o.Namespace != "" {
lo.Namespace = o.Namespace
}
if o.Raw != nil {
lo.Raw = o.Raw
}
if o.Limit > 0 {
lo.Limit = o.Limit
}
if o.Continue != "" {
lo.Continue = o.Continue
}
}
// AsListOptions returns these options as a flattened metav1.ListOptions.
// This may mutate the Raw field.
func (o *ListOptions) AsListOptions() *metav1.ListOptions {
if o == nil {
return &metav1.ListOptions{}
}
if o.Raw == nil {
o.Raw = &metav1.ListOptions{}
}
if o.LabelSelector != nil {
o.Raw.LabelSelector = o.LabelSelector.String()
}
if o.FieldSelector != nil {
o.Raw.FieldSelector = o.FieldSelector.String()
}
if !o.Raw.Watch {
o.Raw.Limit = o.Limit
o.Raw.Continue = o.Continue
}
return o.Raw
}
// ApplyOptions applies the given list options on these options,
// and then returns itself (for convenient chaining).
func (o *ListOptions) ApplyOptions(opts []ListOption) *ListOptions {
for _, opt := range opts {
opt.ApplyToList(o)
}
return o
}
// MatchingLabels filters the list/delete operation on the given set of labels.
type MatchingLabels map[string]string
// ApplyToList applies this configuration to the given list options.
func (m MatchingLabels) ApplyToList(opts *ListOptions) {
// TODO(directxman12): can we avoid reserializing this over and over?
sel := labels.SelectorFromValidatedSet(map[string]string(m))
opts.LabelSelector = sel
}
// ApplyToDeleteAllOf applies this configuration to the given an List options.
func (m MatchingLabels) ApplyToDeleteAllOf(opts *DeleteAllOfOptions) {
m.ApplyToList(&opts.ListOptions)
}
// HasLabels filters the list/delete operation checking if the set of labels exists
// without checking their values.
type HasLabels []string
// ApplyToList applies this configuration to the given list options.
func (m HasLabels) ApplyToList(opts *ListOptions) {
sel := labels.NewSelector()
for _, label := range m {
r, err := labels.NewRequirement(label, selection.Exists, nil)
if err == nil {
sel = sel.Add(*r)
}
}
opts.LabelSelector = sel
}
// ApplyToDeleteAllOf applies this configuration to the given an List options.
func (m HasLabels) ApplyToDeleteAllOf(opts *DeleteAllOfOptions) {
m.ApplyToList(&opts.ListOptions)
}
// MatchingLabelsSelector filters the list/delete operation on the given label
// selector (or index in the case of cached lists). A struct is used because
// labels.Selector is an interface, which cannot be aliased.
type MatchingLabelsSelector struct {
labels.Selector
}
// ApplyToList applies this configuration to the given list options.
func (m MatchingLabelsSelector) ApplyToList(opts *ListOptions) {
opts.LabelSelector = m
}
// ApplyToDeleteAllOf applies this configuration to the given an List options.
func (m MatchingLabelsSelector) ApplyToDeleteAllOf(opts *DeleteAllOfOptions) {
m.ApplyToList(&opts.ListOptions)
}
// MatchingFields filters the list/delete operation on the given field Set
// (or index in the case of cached lists).
type MatchingFields fields.Set
// ApplyToList applies this configuration to the given list options.
func (m MatchingFields) ApplyToList(opts *ListOptions) {
// TODO(directxman12): can we avoid re-serializing this?
sel := fields.Set(m).AsSelector()
opts.FieldSelector = sel
}
// ApplyToDeleteAllOf applies this configuration to the given an List options.
func (m MatchingFields) ApplyToDeleteAllOf(opts *DeleteAllOfOptions) {
m.ApplyToList(&opts.ListOptions)
}
// MatchingFieldsSelector filters the list/delete operation on the given field
// selector (or index in the case of cached lists). A struct is used because
// fields.Selector is an interface, which cannot be aliased.
type MatchingFieldsSelector struct {
fields.Selector
}
// ApplyToList applies this configuration to the given list options.
func (m MatchingFieldsSelector) ApplyToList(opts *ListOptions) {
opts.FieldSelector = m
}
// ApplyToDeleteAllOf applies this configuration to the given an List options.
func (m MatchingFieldsSelector) ApplyToDeleteAllOf(opts *DeleteAllOfOptions) {
m.ApplyToList(&opts.ListOptions)
}
// InNamespace restricts the list/delete operation to the given namespace.
type InNamespace string
// ApplyToList applies this configuration to the given list options.
func (n InNamespace) ApplyToList(opts *ListOptions) {
opts.Namespace = string(n)
}
// ApplyToDeleteAllOf applies this configuration to the given an List options.
func (n InNamespace) ApplyToDeleteAllOf(opts *DeleteAllOfOptions) {
n.ApplyToList(&opts.ListOptions)
}
// Limit specifies the maximum number of results to return from the server.
// Limit does not implement DeleteAllOfOption interface because the server
// does not support setting it for deletecollection operations.
type Limit int64
// ApplyToList applies this configuration to the given an list options.
func (l Limit) ApplyToList(opts *ListOptions) {
opts.Limit = int64(l)
}
// Continue sets a continuation token to retrieve chunks of results when using limit.
// Continue does not implement DeleteAllOfOption interface because the server
// does not support setting it for deletecollection operations.
type Continue string
// ApplyToList applies this configuration to the given an List options.
func (c Continue) ApplyToList(opts *ListOptions) {
opts.Continue = string(c)
}
// }}}
// {{{ Update Options
// UpdateOptions contains options for create requests. It's generally a subset
// of metav1.UpdateOptions.
type UpdateOptions struct {
// When present, indicates that modifications should not be
// persisted. An invalid or unrecognized dryRun directive will
// result in an error response and no further processing of the
// request. Valid values are:
// - All: all dry run stages will be processed
DryRun []string
// FieldManager is the name of the user or component submitting
// this request. It must be set with server-side apply.
FieldManager string
// Raw represents raw UpdateOptions, as passed to the API server.
Raw *metav1.UpdateOptions
}
// AsUpdateOptions returns these options as a metav1.UpdateOptions.
// This may mutate the Raw field.
func (o *UpdateOptions) AsUpdateOptions() *metav1.UpdateOptions {
if o == nil {
return &metav1.UpdateOptions{}
}
if o.Raw == nil {
o.Raw = &metav1.UpdateOptions{}
}
o.Raw.DryRun = o.DryRun
o.Raw.FieldManager = o.FieldManager
return o.Raw
}
// ApplyOptions applies the given update options on these options,
// and then returns itself (for convenient chaining).
func (o *UpdateOptions) ApplyOptions(opts []UpdateOption) *UpdateOptions {
for _, opt := range opts {
opt.ApplyToUpdate(o)
}
return o
}
var _ UpdateOption = &UpdateOptions{}
// ApplyToUpdate implements UpdateOption.
func (o *UpdateOptions) ApplyToUpdate(uo *UpdateOptions) {
if o.DryRun != nil {
uo.DryRun = o.DryRun
}
if o.FieldManager != "" {
uo.FieldManager = o.FieldManager
}
if o.Raw != nil {
uo.Raw = o.Raw
}
}
// }}}
// {{{ Patch Options
// PatchOptions contains options for patch requests.
type PatchOptions struct {
// When present, indicates that modifications should not be
// persisted. An invalid or unrecognized dryRun directive will
// result in an error response and no further processing of the
// request. Valid values are:
// - All: all dry run stages will be processed
DryRun []string
// Force is going to "force" Apply requests. It means user will
// re-acquire conflicting fields owned by other people. Force
// flag must be unset for non-apply patch requests.
// +optional
Force *bool
// FieldManager is the name of the user or component submitting
// this request. It must be set with server-side apply.
FieldManager string
// Raw represents raw PatchOptions, as passed to the API server.
Raw *metav1.PatchOptions
}
// ApplyOptions applies the given patch options on these options,
// and then returns itself (for convenient chaining).
func (o *PatchOptions) ApplyOptions(opts []PatchOption) *PatchOptions {
for _, opt := range opts {
opt.ApplyToPatch(o)
}
return o
}
// AsPatchOptions returns these options as a metav1.PatchOptions.
// This may mutate the Raw field.
func (o *PatchOptions) AsPatchOptions() *metav1.PatchOptions {
if o == nil {
return &metav1.PatchOptions{}
}
if o.Raw == nil {
o.Raw = &metav1.PatchOptions{}
}
o.Raw.DryRun = o.DryRun
o.Raw.Force = o.Force
o.Raw.FieldManager = o.FieldManager
return o.Raw
}
var _ PatchOption = &PatchOptions{}
// ApplyToPatch implements PatchOptions.
func (o *PatchOptions) ApplyToPatch(po *PatchOptions) {
if o.DryRun != nil {
po.DryRun = o.DryRun
}
if o.Force != nil {
po.Force = o.Force
}
if o.FieldManager != "" {
po.FieldManager = o.FieldManager
}
if o.Raw != nil {
po.Raw = o.Raw
}
}
// ForceOwnership indicates that in case of conflicts with server-side apply,
// the client should acquire ownership of the conflicting field. Most
// controllers should use this.
var ForceOwnership = forceOwnership{}
type forceOwnership struct{}
func (forceOwnership) ApplyToPatch(opts *PatchOptions) {
definitelyTrue := true
opts.Force = &definitelyTrue
}
// }}}
// {{{ DeleteAllOf Options
// these are all just delete options and list options
// DeleteAllOfOptions contains options for deletecollection (deleteallof) requests.
// It's just list and delete options smooshed together.
type DeleteAllOfOptions struct {
ListOptions
DeleteOptions
}
// ApplyOptions applies the given deleteallof options on these options,
// and then returns itself (for convenient chaining).
func (o *DeleteAllOfOptions) ApplyOptions(opts []DeleteAllOfOption) *DeleteAllOfOptions {
for _, opt := range opts {
opt.ApplyToDeleteAllOf(o)
}
return o
}
var _ DeleteAllOfOption = &DeleteAllOfOptions{}
// ApplyToDeleteAllOf implements DeleteAllOfOption.
func (o *DeleteAllOfOptions) ApplyToDeleteAllOf(do *DeleteAllOfOptions) {
o.ApplyToList(&do.ListOptions)
o.ApplyToDelete(&do.DeleteOptions)
}
// }}}

View File

@@ -0,0 +1,213 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package client
import (
"fmt"
jsonpatch "github.com/evanphx/json-patch/v5"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/json"
"k8s.io/apimachinery/pkg/util/strategicpatch"
)
var (
// Apply uses server-side apply to patch the given object.
Apply Patch = applyPatch{}
// Merge uses the raw object as a merge patch, without modifications.
// Use MergeFrom if you wish to compute a diff instead.
Merge Patch = mergePatch{}
)
type patch struct {
patchType types.PatchType
data []byte
}
// Type implements Patch.
func (s *patch) Type() types.PatchType {
return s.patchType
}
// Data implements Patch.
func (s *patch) Data(obj Object) ([]byte, error) {
return s.data, nil
}
// RawPatch constructs a new Patch with the given PatchType and data.
func RawPatch(patchType types.PatchType, data []byte) Patch {
return &patch{patchType, data}
}
// MergeFromWithOptimisticLock can be used if clients want to make sure a patch
// is being applied to the latest resource version of an object.
//
// The behavior is similar to what an Update would do, without the need to send the
// whole object. Usually this method is useful if you might have multiple clients
// acting on the same object and the same API version, but with different versions of the Go structs.
//
// For example, an "older" copy of a Widget that has fields A and B, and a "newer" copy with A, B, and C.
// Sending an update using the older struct definition results in C being dropped, whereas using a patch does not.
type MergeFromWithOptimisticLock struct{}
// ApplyToMergeFrom applies this configuration to the given patch options.
func (m MergeFromWithOptimisticLock) ApplyToMergeFrom(in *MergeFromOptions) {
in.OptimisticLock = true
}
// MergeFromOption is some configuration that modifies options for a merge-from patch data.
type MergeFromOption interface {
// ApplyToMergeFrom applies this configuration to the given patch options.
ApplyToMergeFrom(*MergeFromOptions)
}
// MergeFromOptions contains options to generate a merge-from patch data.
type MergeFromOptions struct {
// OptimisticLock, when true, includes `metadata.resourceVersion` into the final
// patch data. If the `resourceVersion` field doesn't match what's stored,
// the operation results in a conflict and clients will need to try again.
OptimisticLock bool
}
type mergeFromPatch struct {
patchType types.PatchType
createPatch func(originalJSON, modifiedJSON []byte, dataStruct interface{}) ([]byte, error)
from Object
opts MergeFromOptions
}
// Type implements Patch.
func (s *mergeFromPatch) Type() types.PatchType {
return s.patchType
}
// Data implements Patch.
func (s *mergeFromPatch) Data(obj Object) ([]byte, error) {
original := s.from
modified := obj
if s.opts.OptimisticLock {
version := original.GetResourceVersion()
if len(version) == 0 {
return nil, fmt.Errorf("cannot use OptimisticLock, object %q does not have any resource version we can use", original)
}
original = original.DeepCopyObject().(Object)
original.SetResourceVersion("")
modified = modified.DeepCopyObject().(Object)
modified.SetResourceVersion(version)
}
originalJSON, err := json.Marshal(original)
if err != nil {
return nil, err
}
modifiedJSON, err := json.Marshal(modified)
if err != nil {
return nil, err
}
data, err := s.createPatch(originalJSON, modifiedJSON, obj)
if err != nil {
return nil, err
}
return data, nil
}
func createMergePatch(originalJSON, modifiedJSON []byte, _ interface{}) ([]byte, error) {
return jsonpatch.CreateMergePatch(originalJSON, modifiedJSON)
}
func createStrategicMergePatch(originalJSON, modifiedJSON []byte, dataStruct interface{}) ([]byte, error) {
return strategicpatch.CreateTwoWayMergePatch(originalJSON, modifiedJSON, dataStruct)
}
// MergeFrom creates a Patch that patches using the merge-patch strategy with the given object as base.
// The difference between MergeFrom and StrategicMergeFrom lays in the handling of modified list fields.
// When using MergeFrom, existing lists will be completely replaced by new lists.
// When using StrategicMergeFrom, the list field's `patchStrategy` is respected if specified in the API type,
// e.g. the existing list is not replaced completely but rather merged with the new one using the list's `patchMergeKey`.
// See https://kubernetes.io/docs/tasks/manage-kubernetes-objects/update-api-object-kubectl-patch/ for more details on
// the difference between merge-patch and strategic-merge-patch.
func MergeFrom(obj Object) Patch {
return &mergeFromPatch{patchType: types.MergePatchType, createPatch: createMergePatch, from: obj}
}
// MergeFromWithOptions creates a Patch that patches using the merge-patch strategy with the given object as base.
// See MergeFrom for more details.
func MergeFromWithOptions(obj Object, opts ...MergeFromOption) Patch {
options := &MergeFromOptions{}
for _, opt := range opts {
opt.ApplyToMergeFrom(options)
}
return &mergeFromPatch{patchType: types.MergePatchType, createPatch: createMergePatch, from: obj, opts: *options}
}
// StrategicMergeFrom creates a Patch that patches using the strategic-merge-patch strategy with the given object as base.
// The difference between MergeFrom and StrategicMergeFrom lays in the handling of modified list fields.
// When using MergeFrom, existing lists will be completely replaced by new lists.
// When using StrategicMergeFrom, the list field's `patchStrategy` is respected if specified in the API type,
// e.g. the existing list is not replaced completely but rather merged with the new one using the list's `patchMergeKey`.
// See https://kubernetes.io/docs/tasks/manage-kubernetes-objects/update-api-object-kubectl-patch/ for more details on
// the difference between merge-patch and strategic-merge-patch.
// Please note, that CRDs don't support strategic-merge-patch, see
// https://kubernetes.io/docs/concepts/extend-kubernetes/api-extension/custom-resources/#advanced-features-and-flexibility
func StrategicMergeFrom(obj Object, opts ...MergeFromOption) Patch {
options := &MergeFromOptions{}
for _, opt := range opts {
opt.ApplyToMergeFrom(options)
}
return &mergeFromPatch{patchType: types.StrategicMergePatchType, createPatch: createStrategicMergePatch, from: obj, opts: *options}
}
// mergePatch uses a raw merge strategy to patch the object.
type mergePatch struct{}
// Type implements Patch.
func (p mergePatch) Type() types.PatchType {
return types.MergePatchType
}
// Data implements Patch.
func (p mergePatch) Data(obj Object) ([]byte, error) {
// NB(directxman12): we might technically want to be using an actual encoder
// here (in case some more performant encoder is introduced) but this is
// correct and sufficient for our uses (it's what the JSON serializer in
// client-go does, more-or-less).
return json.Marshal(obj)
}
// applyPatch uses server-side apply to patch the object.
type applyPatch struct{}
// Type implements Patch.
func (p applyPatch) Type() types.PatchType {
return types.ApplyPatchType
}
// Data implements Patch.
func (p applyPatch) Data(obj Object) ([]byte, error) {
// NB(directxman12): we might technically want to be using an actual encoder
// here (in case some more performant encoder is introduced) but this is
// correct and sufficient for our uses (it's what the JSON serializer in
// client-go does, more-or-less).
return json.Marshal(obj)
}

View File

@@ -0,0 +1,141 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package client
import (
"context"
"strings"
"k8s.io/apimachinery/pkg/api/meta"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"sigs.k8s.io/controller-runtime/pkg/client/apiutil"
)
// NewDelegatingClientInput encapsulates the input parameters to create a new delegating client.
type NewDelegatingClientInput struct {
CacheReader Reader
Client Client
UncachedObjects []Object
CacheUnstructured bool
}
// NewDelegatingClient creates a new delegating client.
//
// A delegating client forms a Client by composing separate reader, writer and
// statusclient interfaces. This way, you can have an Client that reads from a
// cache and writes to the API server.
func NewDelegatingClient(in NewDelegatingClientInput) (Client, error) {
uncachedGVKs := map[schema.GroupVersionKind]struct{}{}
for _, obj := range in.UncachedObjects {
gvk, err := apiutil.GVKForObject(obj, in.Client.Scheme())
if err != nil {
return nil, err
}
uncachedGVKs[gvk] = struct{}{}
}
return &delegatingClient{
scheme: in.Client.Scheme(),
mapper: in.Client.RESTMapper(),
Reader: &delegatingReader{
CacheReader: in.CacheReader,
ClientReader: in.Client,
scheme: in.Client.Scheme(),
uncachedGVKs: uncachedGVKs,
cacheUnstructured: in.CacheUnstructured,
},
Writer: in.Client,
StatusClient: in.Client,
}, nil
}
type delegatingClient struct {
Reader
Writer
StatusClient
scheme *runtime.Scheme
mapper meta.RESTMapper
}
// Scheme returns the scheme this client is using.
func (d *delegatingClient) Scheme() *runtime.Scheme {
return d.scheme
}
// RESTMapper returns the rest mapper this client is using.
func (d *delegatingClient) RESTMapper() meta.RESTMapper {
return d.mapper
}
// delegatingReader forms a Reader that will cause Get and List requests for
// unstructured types to use the ClientReader while requests for any other type
// of object with use the CacheReader. This avoids accidentally caching the
// entire cluster in the common case of loading arbitrary unstructured objects
// (e.g. from OwnerReferences).
type delegatingReader struct {
CacheReader Reader
ClientReader Reader
uncachedGVKs map[schema.GroupVersionKind]struct{}
scheme *runtime.Scheme
cacheUnstructured bool
}
func (d *delegatingReader) shouldBypassCache(obj runtime.Object) (bool, error) {
gvk, err := apiutil.GVKForObject(obj, d.scheme)
if err != nil {
return false, err
}
// TODO: this is producing unsafe guesses that don't actually work,
// but it matches ~99% of the cases out there.
if meta.IsListType(obj) {
gvk.Kind = strings.TrimSuffix(gvk.Kind, "List")
}
if _, isUncached := d.uncachedGVKs[gvk]; isUncached {
return true, nil
}
if !d.cacheUnstructured {
_, isUnstructured := obj.(*unstructured.Unstructured)
_, isUnstructuredList := obj.(*unstructured.UnstructuredList)
return isUnstructured || isUnstructuredList, nil
}
return false, nil
}
// Get retrieves an obj for a given object key from the Kubernetes Cluster.
func (d *delegatingReader) Get(ctx context.Context, key ObjectKey, obj Object, opts ...GetOption) error {
if isUncached, err := d.shouldBypassCache(obj); err != nil {
return err
} else if isUncached {
return d.ClientReader.Get(ctx, key, obj, opts...)
}
return d.CacheReader.Get(ctx, key, obj, opts...)
}
// List retrieves list of objects for a given namespace and list options.
func (d *delegatingReader) List(ctx context.Context, list ObjectList, opts ...ListOption) error {
if isUncached, err := d.shouldBypassCache(list); err != nil {
return err
} else if isUncached {
return d.ClientReader.List(ctx, list, opts...)
}
return d.CacheReader.List(ctx, list, opts...)
}

View File

@@ -0,0 +1,208 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package client
import (
"context"
"k8s.io/apimachinery/pkg/runtime"
)
var _ Reader = &typedClient{}
var _ Writer = &typedClient{}
var _ StatusWriter = &typedClient{}
// client is a client.Client that reads and writes directly from/to an API server. It lazily initializes
// new clients at the time they are used, and caches the client.
type typedClient struct {
cache *clientCache
paramCodec runtime.ParameterCodec
}
// Create implements client.Client.
func (c *typedClient) Create(ctx context.Context, obj Object, opts ...CreateOption) error {
o, err := c.cache.getObjMeta(obj)
if err != nil {
return err
}
createOpts := &CreateOptions{}
createOpts.ApplyOptions(opts)
return o.Post().
NamespaceIfScoped(o.GetNamespace(), o.isNamespaced()).
Resource(o.resource()).
Body(obj).
VersionedParams(createOpts.AsCreateOptions(), c.paramCodec).
Do(ctx).
Into(obj)
}
// Update implements client.Client.
func (c *typedClient) Update(ctx context.Context, obj Object, opts ...UpdateOption) error {
o, err := c.cache.getObjMeta(obj)
if err != nil {
return err
}
updateOpts := &UpdateOptions{}
updateOpts.ApplyOptions(opts)
return o.Put().
NamespaceIfScoped(o.GetNamespace(), o.isNamespaced()).
Resource(o.resource()).
Name(o.GetName()).
Body(obj).
VersionedParams(updateOpts.AsUpdateOptions(), c.paramCodec).
Do(ctx).
Into(obj)
}
// Delete implements client.Client.
func (c *typedClient) Delete(ctx context.Context, obj Object, opts ...DeleteOption) error {
o, err := c.cache.getObjMeta(obj)
if err != nil {
return err
}
deleteOpts := DeleteOptions{}
deleteOpts.ApplyOptions(opts)
return o.Delete().
NamespaceIfScoped(o.GetNamespace(), o.isNamespaced()).
Resource(o.resource()).
Name(o.GetName()).
Body(deleteOpts.AsDeleteOptions()).
Do(ctx).
Error()
}
// DeleteAllOf implements client.Client.
func (c *typedClient) DeleteAllOf(ctx context.Context, obj Object, opts ...DeleteAllOfOption) error {
o, err := c.cache.getObjMeta(obj)
if err != nil {
return err
}
deleteAllOfOpts := DeleteAllOfOptions{}
deleteAllOfOpts.ApplyOptions(opts)
return o.Delete().
NamespaceIfScoped(deleteAllOfOpts.ListOptions.Namespace, o.isNamespaced()).
Resource(o.resource()).
VersionedParams(deleteAllOfOpts.AsListOptions(), c.paramCodec).
Body(deleteAllOfOpts.AsDeleteOptions()).
Do(ctx).
Error()
}
// Patch implements client.Client.
func (c *typedClient) Patch(ctx context.Context, obj Object, patch Patch, opts ...PatchOption) error {
o, err := c.cache.getObjMeta(obj)
if err != nil {
return err
}
data, err := patch.Data(obj)
if err != nil {
return err
}
patchOpts := &PatchOptions{}
return o.Patch(patch.Type()).
NamespaceIfScoped(o.GetNamespace(), o.isNamespaced()).
Resource(o.resource()).
Name(o.GetName()).
VersionedParams(patchOpts.ApplyOptions(opts).AsPatchOptions(), c.paramCodec).
Body(data).
Do(ctx).
Into(obj)
}
// Get implements client.Client.
func (c *typedClient) Get(ctx context.Context, key ObjectKey, obj Object, opts ...GetOption) error {
r, err := c.cache.getResource(obj)
if err != nil {
return err
}
getOpts := GetOptions{}
getOpts.ApplyOptions(opts)
return r.Get().
NamespaceIfScoped(key.Namespace, r.isNamespaced()).
Resource(r.resource()).
VersionedParams(getOpts.AsGetOptions(), c.paramCodec).
Name(key.Name).Do(ctx).Into(obj)
}
// List implements client.Client.
func (c *typedClient) List(ctx context.Context, obj ObjectList, opts ...ListOption) error {
r, err := c.cache.getResource(obj)
if err != nil {
return err
}
listOpts := ListOptions{}
listOpts.ApplyOptions(opts)
return r.Get().
NamespaceIfScoped(listOpts.Namespace, r.isNamespaced()).
Resource(r.resource()).
VersionedParams(listOpts.AsListOptions(), c.paramCodec).
Do(ctx).
Into(obj)
}
// UpdateStatus used by StatusWriter to write status.
func (c *typedClient) UpdateStatus(ctx context.Context, obj Object, opts ...UpdateOption) error {
o, err := c.cache.getObjMeta(obj)
if err != nil {
return err
}
// TODO(droot): examine the returned error and check if it error needs to be
// wrapped to improve the UX ?
// It will be nice to receive an error saying the object doesn't implement
// status subresource and check CRD definition
return o.Put().
NamespaceIfScoped(o.GetNamespace(), o.isNamespaced()).
Resource(o.resource()).
Name(o.GetName()).
SubResource("status").
Body(obj).
VersionedParams((&UpdateOptions{}).ApplyOptions(opts).AsUpdateOptions(), c.paramCodec).
Do(ctx).
Into(obj)
}
// PatchStatus used by StatusWriter to write status.
func (c *typedClient) PatchStatus(ctx context.Context, obj Object, patch Patch, opts ...PatchOption) error {
o, err := c.cache.getObjMeta(obj)
if err != nil {
return err
}
data, err := patch.Data(obj)
if err != nil {
return err
}
patchOpts := &PatchOptions{}
return o.Patch(patch.Type()).
NamespaceIfScoped(o.GetNamespace(), o.isNamespaced()).
Resource(o.resource()).
Name(o.GetName()).
SubResource("status").
Body(data).
VersionedParams(patchOpts.ApplyOptions(opts).AsPatchOptions(), c.paramCodec).
Do(ctx).
Into(obj)
}

View File

@@ -0,0 +1,275 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package client
import (
"context"
"fmt"
"strings"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime"
)
var _ Reader = &unstructuredClient{}
var _ Writer = &unstructuredClient{}
var _ StatusWriter = &unstructuredClient{}
// client is a client.Client that reads and writes directly from/to an API server. It lazily initializes
// new clients at the time they are used, and caches the client.
type unstructuredClient struct {
cache *clientCache
paramCodec runtime.ParameterCodec
}
// Create implements client.Client.
func (uc *unstructuredClient) Create(ctx context.Context, obj Object, opts ...CreateOption) error {
u, ok := obj.(*unstructured.Unstructured)
if !ok {
return fmt.Errorf("unstructured client did not understand object: %T", obj)
}
gvk := u.GroupVersionKind()
o, err := uc.cache.getObjMeta(obj)
if err != nil {
return err
}
createOpts := &CreateOptions{}
createOpts.ApplyOptions(opts)
result := o.Post().
NamespaceIfScoped(o.GetNamespace(), o.isNamespaced()).
Resource(o.resource()).
Body(obj).
VersionedParams(createOpts.AsCreateOptions(), uc.paramCodec).
Do(ctx).
Into(obj)
u.SetGroupVersionKind(gvk)
return result
}
// Update implements client.Client.
func (uc *unstructuredClient) Update(ctx context.Context, obj Object, opts ...UpdateOption) error {
u, ok := obj.(*unstructured.Unstructured)
if !ok {
return fmt.Errorf("unstructured client did not understand object: %T", obj)
}
gvk := u.GroupVersionKind()
o, err := uc.cache.getObjMeta(obj)
if err != nil {
return err
}
updateOpts := UpdateOptions{}
updateOpts.ApplyOptions(opts)
result := o.Put().
NamespaceIfScoped(o.GetNamespace(), o.isNamespaced()).
Resource(o.resource()).
Name(o.GetName()).
Body(obj).
VersionedParams(updateOpts.AsUpdateOptions(), uc.paramCodec).
Do(ctx).
Into(obj)
u.SetGroupVersionKind(gvk)
return result
}
// Delete implements client.Client.
func (uc *unstructuredClient) Delete(ctx context.Context, obj Object, opts ...DeleteOption) error {
if _, ok := obj.(*unstructured.Unstructured); !ok {
return fmt.Errorf("unstructured client did not understand object: %T", obj)
}
o, err := uc.cache.getObjMeta(obj)
if err != nil {
return err
}
deleteOpts := DeleteOptions{}
deleteOpts.ApplyOptions(opts)
return o.Delete().
NamespaceIfScoped(o.GetNamespace(), o.isNamespaced()).
Resource(o.resource()).
Name(o.GetName()).
Body(deleteOpts.AsDeleteOptions()).
Do(ctx).
Error()
}
// DeleteAllOf implements client.Client.
func (uc *unstructuredClient) DeleteAllOf(ctx context.Context, obj Object, opts ...DeleteAllOfOption) error {
if _, ok := obj.(*unstructured.Unstructured); !ok {
return fmt.Errorf("unstructured client did not understand object: %T", obj)
}
o, err := uc.cache.getObjMeta(obj)
if err != nil {
return err
}
deleteAllOfOpts := DeleteAllOfOptions{}
deleteAllOfOpts.ApplyOptions(opts)
return o.Delete().
NamespaceIfScoped(deleteAllOfOpts.ListOptions.Namespace, o.isNamespaced()).
Resource(o.resource()).
VersionedParams(deleteAllOfOpts.AsListOptions(), uc.paramCodec).
Body(deleteAllOfOpts.AsDeleteOptions()).
Do(ctx).
Error()
}
// Patch implements client.Client.
func (uc *unstructuredClient) Patch(ctx context.Context, obj Object, patch Patch, opts ...PatchOption) error {
if _, ok := obj.(*unstructured.Unstructured); !ok {
return fmt.Errorf("unstructured client did not understand object: %T", obj)
}
o, err := uc.cache.getObjMeta(obj)
if err != nil {
return err
}
data, err := patch.Data(obj)
if err != nil {
return err
}
patchOpts := &PatchOptions{}
return o.Patch(patch.Type()).
NamespaceIfScoped(o.GetNamespace(), o.isNamespaced()).
Resource(o.resource()).
Name(o.GetName()).
VersionedParams(patchOpts.ApplyOptions(opts).AsPatchOptions(), uc.paramCodec).
Body(data).
Do(ctx).
Into(obj)
}
// Get implements client.Client.
func (uc *unstructuredClient) Get(ctx context.Context, key ObjectKey, obj Object, opts ...GetOption) error {
u, ok := obj.(*unstructured.Unstructured)
if !ok {
return fmt.Errorf("unstructured client did not understand object: %T", obj)
}
gvk := u.GroupVersionKind()
getOpts := GetOptions{}
getOpts.ApplyOptions(opts)
r, err := uc.cache.getResource(obj)
if err != nil {
return err
}
result := r.Get().
NamespaceIfScoped(key.Namespace, r.isNamespaced()).
Resource(r.resource()).
VersionedParams(getOpts.AsGetOptions(), uc.paramCodec).
Name(key.Name).
Do(ctx).
Into(obj)
u.SetGroupVersionKind(gvk)
return result
}
// List implements client.Client.
func (uc *unstructuredClient) List(ctx context.Context, obj ObjectList, opts ...ListOption) error {
u, ok := obj.(*unstructured.UnstructuredList)
if !ok {
return fmt.Errorf("unstructured client did not understand object: %T", obj)
}
gvk := u.GroupVersionKind()
gvk.Kind = strings.TrimSuffix(gvk.Kind, "List")
listOpts := ListOptions{}
listOpts.ApplyOptions(opts)
r, err := uc.cache.getResource(obj)
if err != nil {
return err
}
return r.Get().
NamespaceIfScoped(listOpts.Namespace, r.isNamespaced()).
Resource(r.resource()).
VersionedParams(listOpts.AsListOptions(), uc.paramCodec).
Do(ctx).
Into(obj)
}
func (uc *unstructuredClient) UpdateStatus(ctx context.Context, obj Object, opts ...UpdateOption) error {
if _, ok := obj.(*unstructured.Unstructured); !ok {
return fmt.Errorf("unstructured client did not understand object: %T", obj)
}
o, err := uc.cache.getObjMeta(obj)
if err != nil {
return err
}
return o.Put().
NamespaceIfScoped(o.GetNamespace(), o.isNamespaced()).
Resource(o.resource()).
Name(o.GetName()).
SubResource("status").
Body(obj).
VersionedParams((&UpdateOptions{}).ApplyOptions(opts).AsUpdateOptions(), uc.paramCodec).
Do(ctx).
Into(obj)
}
func (uc *unstructuredClient) PatchStatus(ctx context.Context, obj Object, patch Patch, opts ...PatchOption) error {
u, ok := obj.(*unstructured.Unstructured)
if !ok {
return fmt.Errorf("unstructured client did not understand object: %T", obj)
}
gvk := u.GroupVersionKind()
o, err := uc.cache.getObjMeta(obj)
if err != nil {
return err
}
data, err := patch.Data(obj)
if err != nil {
return err
}
patchOpts := &PatchOptions{}
result := o.Patch(patch.Type()).
NamespaceIfScoped(o.GetNamespace(), o.isNamespaced()).
Resource(o.resource()).
Name(o.GetName()).
SubResource("status").
Body(data).
VersionedParams(patchOpts.ApplyOptions(opts).AsPatchOptions(), uc.paramCodec).
Do(ctx).
Into(u)
u.SetGroupVersionKind(gvk)
return result
}

View File

@@ -0,0 +1,114 @@
/*
Copyright 2020 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package client
import (
"context"
"strings"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/watch"
"k8s.io/client-go/dynamic"
"k8s.io/client-go/rest"
)
// NewWithWatch returns a new WithWatch.
func NewWithWatch(config *rest.Config, options Options) (WithWatch, error) {
client, err := newClient(config, options)
if err != nil {
return nil, err
}
dynamicClient, err := dynamic.NewForConfig(config)
if err != nil {
return nil, err
}
return &watchingClient{client: client, dynamic: dynamicClient}, nil
}
type watchingClient struct {
*client
dynamic dynamic.Interface
}
func (w *watchingClient) Watch(ctx context.Context, list ObjectList, opts ...ListOption) (watch.Interface, error) {
switch l := list.(type) {
case *unstructured.UnstructuredList:
return w.unstructuredWatch(ctx, l, opts...)
case *metav1.PartialObjectMetadataList:
return w.metadataWatch(ctx, l, opts...)
default:
return w.typedWatch(ctx, l, opts...)
}
}
func (w *watchingClient) listOpts(opts ...ListOption) ListOptions {
listOpts := ListOptions{}
listOpts.ApplyOptions(opts)
if listOpts.Raw == nil {
listOpts.Raw = &metav1.ListOptions{}
}
listOpts.Raw.Watch = true
return listOpts
}
func (w *watchingClient) metadataWatch(ctx context.Context, obj *metav1.PartialObjectMetadataList, opts ...ListOption) (watch.Interface, error) {
gvk := obj.GroupVersionKind()
gvk.Kind = strings.TrimSuffix(gvk.Kind, "List")
listOpts := w.listOpts(opts...)
resInt, err := w.client.metadataClient.getResourceInterface(gvk, listOpts.Namespace)
if err != nil {
return nil, err
}
return resInt.Watch(ctx, *listOpts.AsListOptions())
}
func (w *watchingClient) unstructuredWatch(ctx context.Context, obj *unstructured.UnstructuredList, opts ...ListOption) (watch.Interface, error) {
gvk := obj.GroupVersionKind()
gvk.Kind = strings.TrimSuffix(gvk.Kind, "List")
r, err := w.client.unstructuredClient.cache.getResource(obj)
if err != nil {
return nil, err
}
listOpts := w.listOpts(opts...)
if listOpts.Namespace != "" && r.isNamespaced() {
return w.dynamic.Resource(r.mapping.Resource).Namespace(listOpts.Namespace).Watch(ctx, *listOpts.AsListOptions())
}
return w.dynamic.Resource(r.mapping.Resource).Watch(ctx, *listOpts.AsListOptions())
}
func (w *watchingClient) typedWatch(ctx context.Context, obj ObjectList, opts ...ListOption) (watch.Interface, error) {
r, err := w.client.typedClient.cache.getResource(obj)
if err != nil {
return nil, err
}
listOpts := w.listOpts(opts...)
return r.Get().
NamespaceIfScoped(listOpts.Namespace, r.isNamespaced()).
Resource(r.resource()).
VersionedParams(listOpts.AsListOptions(), w.client.typedClient.paramCodec).
Watch(ctx)
}

View File

@@ -0,0 +1,270 @@
/*
Copyright 2020 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cluster
import (
"context"
"errors"
"time"
"github.com/go-logr/logr"
"k8s.io/apimachinery/pkg/api/meta"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/kubernetes/scheme"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/record"
"sigs.k8s.io/controller-runtime/pkg/client/apiutil"
logf "sigs.k8s.io/controller-runtime/pkg/internal/log"
"sigs.k8s.io/controller-runtime/pkg/cache"
"sigs.k8s.io/controller-runtime/pkg/client"
intrec "sigs.k8s.io/controller-runtime/pkg/internal/recorder"
)
// Cluster provides various methods to interact with a cluster.
type Cluster interface {
// SetFields will set any dependencies on an object for which the object has implemented the inject
// interface - e.g. inject.Client.
// Deprecated: use the equivalent Options field to set a field. This method will be removed in v0.10.
SetFields(interface{}) error
// GetConfig returns an initialized Config
GetConfig() *rest.Config
// GetScheme returns an initialized Scheme
GetScheme() *runtime.Scheme
// GetClient returns a client configured with the Config. This client may
// not be a fully "direct" client -- it may read from a cache, for
// instance. See Options.NewClient for more information on how the default
// implementation works.
GetClient() client.Client
// GetFieldIndexer returns a client.FieldIndexer configured with the client
GetFieldIndexer() client.FieldIndexer
// GetCache returns a cache.Cache
GetCache() cache.Cache
// GetEventRecorderFor returns a new EventRecorder for the provided name
GetEventRecorderFor(name string) record.EventRecorder
// GetRESTMapper returns a RESTMapper
GetRESTMapper() meta.RESTMapper
// GetAPIReader returns a reader that will be configured to use the API server.
// This should be used sparingly and only when the client does not fit your
// use case.
GetAPIReader() client.Reader
// Start starts the cluster
Start(ctx context.Context) error
}
// Options are the possible options that can be configured for a Cluster.
type Options struct {
// Scheme is the scheme used to resolve runtime.Objects to GroupVersionKinds / Resources
// Defaults to the kubernetes/client-go scheme.Scheme, but it's almost always better
// idea to pass your own scheme in. See the documentation in pkg/scheme for more information.
Scheme *runtime.Scheme
// MapperProvider provides the rest mapper used to map go types to Kubernetes APIs
MapperProvider func(c *rest.Config) (meta.RESTMapper, error)
// Logger is the logger that should be used by this Cluster.
// If none is set, it defaults to log.Log global logger.
Logger logr.Logger
// SyncPeriod determines the minimum frequency at which watched resources are
// reconciled. A lower period will correct entropy more quickly, but reduce
// responsiveness to change if there are many watched resources. Change this
// value only if you know what you are doing. Defaults to 10 hours if unset.
// there will a 10 percent jitter between the SyncPeriod of all controllers
// so that all controllers will not send list requests simultaneously.
SyncPeriod *time.Duration
// Namespace if specified restricts the manager's cache to watch objects in
// the desired namespace Defaults to all namespaces
//
// Note: If a namespace is specified, controllers can still Watch for a
// cluster-scoped resource (e.g Node). For namespaced resources the cache
// will only hold objects from the desired namespace.
Namespace string
// NewCache is the function that will create the cache to be used
// by the manager. If not set this will use the default new cache function.
NewCache cache.NewCacheFunc
// NewClient is the func that creates the client to be used by the manager.
// If not set this will create the default DelegatingClient that will
// use the cache for reads and the client for writes.
NewClient NewClientFunc
// ClientDisableCacheFor tells the client that, if any cache is used, to bypass it
// for the given objects.
ClientDisableCacheFor []client.Object
// DryRunClient specifies whether the client should be configured to enforce
// dryRun mode.
DryRunClient bool
// EventBroadcaster records Events emitted by the manager and sends them to the Kubernetes API
// Use this to customize the event correlator and spam filter
//
// Deprecated: using this may cause goroutine leaks if the lifetime of your manager or controllers
// is shorter than the lifetime of your process.
EventBroadcaster record.EventBroadcaster
// makeBroadcaster allows deferring the creation of the broadcaster to
// avoid leaking goroutines if we never call Start on this manager. It also
// returns whether or not this is a "owned" broadcaster, and as such should be
// stopped with the manager.
makeBroadcaster intrec.EventBroadcasterProducer
// Dependency injection for testing
newRecorderProvider func(config *rest.Config, scheme *runtime.Scheme, logger logr.Logger, makeBroadcaster intrec.EventBroadcasterProducer) (*intrec.Provider, error)
}
// Option can be used to manipulate Options.
type Option func(*Options)
// New constructs a brand new cluster.
func New(config *rest.Config, opts ...Option) (Cluster, error) {
if config == nil {
return nil, errors.New("must specify Config")
}
options := Options{}
for _, opt := range opts {
opt(&options)
}
options = setOptionsDefaults(options)
// Create the mapper provider
mapper, err := options.MapperProvider(config)
if err != nil {
options.Logger.Error(err, "Failed to get API Group-Resources")
return nil, err
}
// Create the cache for the cached read client and registering informers
cache, err := options.NewCache(config, cache.Options{Scheme: options.Scheme, Mapper: mapper, Resync: options.SyncPeriod, Namespace: options.Namespace})
if err != nil {
return nil, err
}
clientOptions := client.Options{Scheme: options.Scheme, Mapper: mapper}
apiReader, err := client.New(config, clientOptions)
if err != nil {
return nil, err
}
writeObj, err := options.NewClient(cache, config, clientOptions, options.ClientDisableCacheFor...)
if err != nil {
return nil, err
}
if options.DryRunClient {
writeObj = client.NewDryRunClient(writeObj)
}
// Create the recorder provider to inject event recorders for the components.
// TODO(directxman12): the log for the event provider should have a context (name, tags, etc) specific
// to the particular controller that it's being injected into, rather than a generic one like is here.
recorderProvider, err := options.newRecorderProvider(config, options.Scheme, options.Logger.WithName("events"), options.makeBroadcaster)
if err != nil {
return nil, err
}
return &cluster{
config: config,
scheme: options.Scheme,
cache: cache,
fieldIndexes: cache,
client: writeObj,
apiReader: apiReader,
recorderProvider: recorderProvider,
mapper: mapper,
logger: options.Logger,
}, nil
}
// setOptionsDefaults set default values for Options fields.
func setOptionsDefaults(options Options) Options {
// Use the Kubernetes client-go scheme if none is specified
if options.Scheme == nil {
options.Scheme = scheme.Scheme
}
if options.MapperProvider == nil {
options.MapperProvider = func(c *rest.Config) (meta.RESTMapper, error) {
return apiutil.NewDynamicRESTMapper(c)
}
}
// Allow users to define how to create a new client
if options.NewClient == nil {
options.NewClient = DefaultNewClient
}
// Allow newCache to be mocked
if options.NewCache == nil {
options.NewCache = cache.New
}
// Allow newRecorderProvider to be mocked
if options.newRecorderProvider == nil {
options.newRecorderProvider = intrec.NewProvider
}
// This is duplicated with pkg/manager, we need it here to provide
// the user with an EventBroadcaster and there for the Leader election
if options.EventBroadcaster == nil {
// defer initialization to avoid leaking by default
options.makeBroadcaster = func() (record.EventBroadcaster, bool) {
return record.NewBroadcaster(), true
}
} else {
options.makeBroadcaster = func() (record.EventBroadcaster, bool) {
return options.EventBroadcaster, false
}
}
if options.Logger.GetSink() == nil {
options.Logger = logf.RuntimeLog.WithName("cluster")
}
return options
}
// NewClientFunc allows a user to define how to create a client.
type NewClientFunc func(cache cache.Cache, config *rest.Config, options client.Options, uncachedObjects ...client.Object) (client.Client, error)
// DefaultNewClient creates the default caching client.
func DefaultNewClient(cache cache.Cache, config *rest.Config, options client.Options, uncachedObjects ...client.Object) (client.Client, error) {
c, err := client.New(config, options)
if err != nil {
return nil, err
}
return client.NewDelegatingClient(client.NewDelegatingClientInput{
CacheReader: cache,
Client: c,
UncachedObjects: uncachedObjects,
})
}

View File

@@ -0,0 +1,128 @@
/*
Copyright 2020 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cluster
import (
"context"
"github.com/go-logr/logr"
"k8s.io/apimachinery/pkg/api/meta"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/record"
"sigs.k8s.io/controller-runtime/pkg/cache"
"sigs.k8s.io/controller-runtime/pkg/client"
intrec "sigs.k8s.io/controller-runtime/pkg/internal/recorder"
"sigs.k8s.io/controller-runtime/pkg/runtime/inject"
)
type cluster struct {
// config is the rest.config used to talk to the apiserver. Required.
config *rest.Config
// scheme is the scheme injected into Controllers, EventHandlers, Sources and Predicates. Defaults
// to scheme.scheme.
scheme *runtime.Scheme
cache cache.Cache
// TODO(directxman12): Provide an escape hatch to get individual indexers
// client is the client injected into Controllers (and EventHandlers, Sources and Predicates).
client client.Client
// apiReader is the reader that will make requests to the api server and not the cache.
apiReader client.Reader
// fieldIndexes knows how to add field indexes over the Cache used by this controller,
// which can later be consumed via field selectors from the injected client.
fieldIndexes client.FieldIndexer
// recorderProvider is used to generate event recorders that will be injected into Controllers
// (and EventHandlers, Sources and Predicates).
recorderProvider *intrec.Provider
// mapper is used to map resources to kind, and map kind and version.
mapper meta.RESTMapper
// Logger is the logger that should be used by this manager.
// If none is set, it defaults to log.Log global logger.
logger logr.Logger
}
func (c *cluster) SetFields(i interface{}) error {
if _, err := inject.ConfigInto(c.config, i); err != nil {
return err
}
if _, err := inject.ClientInto(c.client, i); err != nil {
return err
}
if _, err := inject.APIReaderInto(c.apiReader, i); err != nil {
return err
}
if _, err := inject.SchemeInto(c.scheme, i); err != nil {
return err
}
if _, err := inject.CacheInto(c.cache, i); err != nil {
return err
}
if _, err := inject.MapperInto(c.mapper, i); err != nil {
return err
}
return nil
}
func (c *cluster) GetConfig() *rest.Config {
return c.config
}
func (c *cluster) GetClient() client.Client {
return c.client
}
func (c *cluster) GetScheme() *runtime.Scheme {
return c.scheme
}
func (c *cluster) GetFieldIndexer() client.FieldIndexer {
return c.fieldIndexes
}
func (c *cluster) GetCache() cache.Cache {
return c.cache
}
func (c *cluster) GetEventRecorderFor(name string) record.EventRecorder {
return c.recorderProvider.GetEventRecorderFor(name)
}
func (c *cluster) GetRESTMapper() meta.RESTMapper {
return c.mapper
}
func (c *cluster) GetAPIReader() client.Reader {
return c.apiReader
}
func (c *cluster) GetLogger() logr.Logger {
return c.logger
}
func (c *cluster) Start(ctx context.Context) error {
defer c.recorderProvider.Stop(ctx)
return c.cache.Start(ctx)
}

View File

@@ -0,0 +1,112 @@
/*
Copyright 2020 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package config
import (
"fmt"
"os"
"sync"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/serializer"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"sigs.k8s.io/controller-runtime/pkg/config/v1alpha1"
)
// ControllerManagerConfiguration defines the functions necessary to parse a config file
// and to configure the Options struct for the ctrl.Manager.
type ControllerManagerConfiguration interface {
runtime.Object
// Complete returns the versioned configuration
Complete() (v1alpha1.ControllerManagerConfigurationSpec, error)
}
// DeferredFileLoader is used to configure the decoder for loading controller
// runtime component config types.
type DeferredFileLoader struct {
ControllerManagerConfiguration
path string
scheme *runtime.Scheme
once sync.Once
err error
}
// File will set up the deferred file loader for the configuration
// this will also configure the defaults for the loader if nothing is
//
// Defaults:
// * Path: "./config.yaml"
// * Kind: GenericControllerManagerConfiguration
func File() *DeferredFileLoader {
scheme := runtime.NewScheme()
utilruntime.Must(v1alpha1.AddToScheme(scheme))
return &DeferredFileLoader{
path: "./config.yaml",
ControllerManagerConfiguration: &v1alpha1.ControllerManagerConfiguration{},
scheme: scheme,
}
}
// Complete will use sync.Once to set the scheme.
func (d *DeferredFileLoader) Complete() (v1alpha1.ControllerManagerConfigurationSpec, error) {
d.once.Do(d.loadFile)
if d.err != nil {
return v1alpha1.ControllerManagerConfigurationSpec{}, d.err
}
return d.ControllerManagerConfiguration.Complete()
}
// AtPath will set the path to load the file for the decoder.
func (d *DeferredFileLoader) AtPath(path string) *DeferredFileLoader {
d.path = path
return d
}
// OfKind will set the type to be used for decoding the file into.
func (d *DeferredFileLoader) OfKind(obj ControllerManagerConfiguration) *DeferredFileLoader {
d.ControllerManagerConfiguration = obj
return d
}
// InjectScheme will configure the scheme to be used for decoding the file.
func (d *DeferredFileLoader) InjectScheme(scheme *runtime.Scheme) error {
d.scheme = scheme
return nil
}
// loadFile is used from the mutex.Once to load the file.
func (d *DeferredFileLoader) loadFile() {
if d.scheme == nil {
d.err = fmt.Errorf("scheme not supplied to controller configuration loader")
return
}
content, err := os.ReadFile(d.path)
if err != nil {
d.err = fmt.Errorf("could not read file at %s", d.path)
return
}
codecs := serializer.NewCodecFactory(d.scheme)
// Regardless of if the bytes are of any external version,
// it will be read successfully and converted into the internal version
if err = runtime.DecodeInto(codecs.UniversalDecoder(), content, d.ControllerManagerConfiguration); err != nil {
d.err = fmt.Errorf("could not decode file into runtime.Object")
}
}

View File

@@ -0,0 +1,25 @@
/*
Copyright 2020 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package config contains functionality for interacting with ComponentConfig
// files
//
// # DeferredFileLoader
//
// This uses a deferred file decoding allowing you to chain your configuration
// setup. You can pass this into manager.Options#File and it will load your
// config.
package config

View File

@@ -0,0 +1,20 @@
/*
Copyright 2020 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package v1alpha1 provides the ControllerManagerConfiguration used for
// configuring ctrl.Manager
// +kubebuilder:object:generate=true
package v1alpha1

View File

@@ -0,0 +1,37 @@
/*
Copyright 2020 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha1
import (
"k8s.io/apimachinery/pkg/runtime/schema"
"sigs.k8s.io/controller-runtime/pkg/scheme"
)
var (
// GroupVersion is group version used to register these objects.
GroupVersion = schema.GroupVersion{Group: "controller-runtime.sigs.k8s.io", Version: "v1alpha1"}
// SchemeBuilder is used to add go types to the GroupVersionKind scheme.
SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion}
// AddToScheme adds the types in this group-version to the given scheme.
AddToScheme = SchemeBuilder.AddToScheme
)
func init() {
SchemeBuilder.Register(&ControllerManagerConfiguration{})
}

View File

@@ -0,0 +1,157 @@
/*
Copyright 2020 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha1
import (
"time"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
configv1alpha1 "k8s.io/component-base/config/v1alpha1"
)
// ControllerManagerConfigurationSpec defines the desired state of GenericControllerManagerConfiguration.
type ControllerManagerConfigurationSpec struct {
// SyncPeriod determines the minimum frequency at which watched resources are
// reconciled. A lower period will correct entropy more quickly, but reduce
// responsiveness to change if there are many watched resources. Change this
// value only if you know what you are doing. Defaults to 10 hours if unset.
// there will a 10 percent jitter between the SyncPeriod of all controllers
// so that all controllers will not send list requests simultaneously.
// +optional
SyncPeriod *metav1.Duration `json:"syncPeriod,omitempty"`
// LeaderElection is the LeaderElection config to be used when configuring
// the manager.Manager leader election
// +optional
LeaderElection *configv1alpha1.LeaderElectionConfiguration `json:"leaderElection,omitempty"`
// CacheNamespace if specified restricts the manager's cache to watch objects in
// the desired namespace Defaults to all namespaces
//
// Note: If a namespace is specified, controllers can still Watch for a
// cluster-scoped resource (e.g Node). For namespaced resources the cache
// will only hold objects from the desired namespace.
// +optional
CacheNamespace string `json:"cacheNamespace,omitempty"`
// GracefulShutdownTimeout is the duration given to runnable to stop before the manager actually returns on stop.
// To disable graceful shutdown, set to time.Duration(0)
// To use graceful shutdown without timeout, set to a negative duration, e.G. time.Duration(-1)
// The graceful shutdown is skipped for safety reasons in case the leader election lease is lost.
GracefulShutdownTimeout *metav1.Duration `json:"gracefulShutDown,omitempty"`
// Controller contains global configuration options for controllers
// registered within this manager.
// +optional
Controller *ControllerConfigurationSpec `json:"controller,omitempty"`
// Metrics contains thw controller metrics configuration
// +optional
Metrics ControllerMetrics `json:"metrics,omitempty"`
// Health contains the controller health configuration
// +optional
Health ControllerHealth `json:"health,omitempty"`
// Webhook contains the controllers webhook configuration
// +optional
Webhook ControllerWebhook `json:"webhook,omitempty"`
}
// ControllerConfigurationSpec defines the global configuration for
// controllers registered with the manager.
type ControllerConfigurationSpec struct {
// GroupKindConcurrency is a map from a Kind to the number of concurrent reconciliation
// allowed for that controller.
//
// When a controller is registered within this manager using the builder utilities,
// users have to specify the type the controller reconciles in the For(...) call.
// If the object's kind passed matches one of the keys in this map, the concurrency
// for that controller is set to the number specified.
//
// The key is expected to be consistent in form with GroupKind.String(),
// e.g. ReplicaSet in apps group (regardless of version) would be `ReplicaSet.apps`.
//
// +optional
GroupKindConcurrency map[string]int `json:"groupKindConcurrency,omitempty"`
// CacheSyncTimeout refers to the time limit set to wait for syncing caches.
// Defaults to 2 minutes if not set.
// +optional
CacheSyncTimeout *time.Duration `json:"cacheSyncTimeout,omitempty"`
}
// ControllerMetrics defines the metrics configs.
type ControllerMetrics struct {
// BindAddress is the TCP address that the controller should bind to
// for serving prometheus metrics.
// It can be set to "0" to disable the metrics serving.
// +optional
BindAddress string `json:"bindAddress,omitempty"`
}
// ControllerHealth defines the health configs.
type ControllerHealth struct {
// HealthProbeBindAddress is the TCP address that the controller should bind to
// for serving health probes
// +optional
HealthProbeBindAddress string `json:"healthProbeBindAddress,omitempty"`
// ReadinessEndpointName, defaults to "readyz"
// +optional
ReadinessEndpointName string `json:"readinessEndpointName,omitempty"`
// LivenessEndpointName, defaults to "healthz"
// +optional
LivenessEndpointName string `json:"livenessEndpointName,omitempty"`
}
// ControllerWebhook defines the webhook server for the controller.
type ControllerWebhook struct {
// Port is the port that the webhook server serves at.
// It is used to set webhook.Server.Port.
// +optional
Port *int `json:"port,omitempty"`
// Host is the hostname that the webhook server binds to.
// It is used to set webhook.Server.Host.
// +optional
Host string `json:"host,omitempty"`
// CertDir is the directory that contains the server key and certificate.
// if not set, webhook server would look up the server key and certificate in
// {TempDir}/k8s-webhook-server/serving-certs. The server key and certificate
// must be named tls.key and tls.crt, respectively.
// +optional
CertDir string `json:"certDir,omitempty"`
}
// +kubebuilder:object:root=true
// ControllerManagerConfiguration is the Schema for the GenericControllerManagerConfigurations API.
type ControllerManagerConfiguration struct {
metav1.TypeMeta `json:",inline"`
// ControllerManagerConfiguration returns the contfigurations for controllers
ControllerManagerConfigurationSpec `json:",inline"`
}
// Complete returns the configuration for controller-runtime.
func (c *ControllerManagerConfigurationSpec) Complete() (ControllerManagerConfigurationSpec, error) {
return *c, nil
}

View File

@@ -0,0 +1,153 @@
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
// Code generated by controller-gen. DO NOT EDIT.
package v1alpha1
import (
"k8s.io/apimachinery/pkg/apis/meta/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
configv1alpha1 "k8s.io/component-base/config/v1alpha1"
timex "time"
)
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ControllerConfigurationSpec) DeepCopyInto(out *ControllerConfigurationSpec) {
*out = *in
if in.GroupKindConcurrency != nil {
in, out := &in.GroupKindConcurrency, &out.GroupKindConcurrency
*out = make(map[string]int, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
if in.CacheSyncTimeout != nil {
in, out := &in.CacheSyncTimeout, &out.CacheSyncTimeout
*out = new(timex.Duration)
**out = **in
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ControllerConfigurationSpec.
func (in *ControllerConfigurationSpec) DeepCopy() *ControllerConfigurationSpec {
if in == nil {
return nil
}
out := new(ControllerConfigurationSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ControllerHealth) DeepCopyInto(out *ControllerHealth) {
*out = *in
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ControllerHealth.
func (in *ControllerHealth) DeepCopy() *ControllerHealth {
if in == nil {
return nil
}
out := new(ControllerHealth)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ControllerManagerConfiguration) DeepCopyInto(out *ControllerManagerConfiguration) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ControllerManagerConfigurationSpec.DeepCopyInto(&out.ControllerManagerConfigurationSpec)
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ControllerManagerConfiguration.
func (in *ControllerManagerConfiguration) DeepCopy() *ControllerManagerConfiguration {
if in == nil {
return nil
}
out := new(ControllerManagerConfiguration)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *ControllerManagerConfiguration) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ControllerManagerConfigurationSpec) DeepCopyInto(out *ControllerManagerConfigurationSpec) {
*out = *in
if in.SyncPeriod != nil {
in, out := &in.SyncPeriod, &out.SyncPeriod
*out = new(v1.Duration)
**out = **in
}
if in.LeaderElection != nil {
in, out := &in.LeaderElection, &out.LeaderElection
*out = new(configv1alpha1.LeaderElectionConfiguration)
(*in).DeepCopyInto(*out)
}
if in.GracefulShutdownTimeout != nil {
in, out := &in.GracefulShutdownTimeout, &out.GracefulShutdownTimeout
*out = new(v1.Duration)
**out = **in
}
if in.Controller != nil {
in, out := &in.Controller, &out.Controller
*out = new(ControllerConfigurationSpec)
(*in).DeepCopyInto(*out)
}
out.Metrics = in.Metrics
out.Health = in.Health
in.Webhook.DeepCopyInto(&out.Webhook)
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ControllerManagerConfigurationSpec.
func (in *ControllerManagerConfigurationSpec) DeepCopy() *ControllerManagerConfigurationSpec {
if in == nil {
return nil
}
out := new(ControllerManagerConfigurationSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ControllerMetrics) DeepCopyInto(out *ControllerMetrics) {
*out = *in
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ControllerMetrics.
func (in *ControllerMetrics) DeepCopy() *ControllerMetrics {
if in == nil {
return nil
}
out := new(ControllerMetrics)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ControllerWebhook) DeepCopyInto(out *ControllerWebhook) {
*out = *in
if in.Port != nil {
in, out := &in.Port, &out.Port
*out = new(int)
**out = **in
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ControllerWebhook.
func (in *ControllerWebhook) DeepCopy() *ControllerWebhook {
if in == nil {
return nil
}
out := new(ControllerWebhook)
in.DeepCopyInto(out)
return out
}

View File

@@ -0,0 +1,155 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package controller
import (
"context"
"fmt"
"time"
"github.com/go-logr/logr"
"k8s.io/client-go/util/workqueue"
"k8s.io/klog/v2"
"sigs.k8s.io/controller-runtime/pkg/handler"
"sigs.k8s.io/controller-runtime/pkg/internal/controller"
"sigs.k8s.io/controller-runtime/pkg/manager"
"sigs.k8s.io/controller-runtime/pkg/predicate"
"sigs.k8s.io/controller-runtime/pkg/ratelimiter"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
"sigs.k8s.io/controller-runtime/pkg/source"
)
// Options are the arguments for creating a new Controller.
type Options struct {
// MaxConcurrentReconciles is the maximum number of concurrent Reconciles which can be run. Defaults to 1.
MaxConcurrentReconciles int
// Reconciler reconciles an object
Reconciler reconcile.Reconciler
// RateLimiter is used to limit how frequently requests may be queued.
// Defaults to MaxOfRateLimiter which has both overall and per-item rate limiting.
// The overall is a token bucket and the per-item is exponential.
RateLimiter ratelimiter.RateLimiter
// LogConstructor is used to construct a logger used for this controller and passed
// to each reconciliation via the context field.
LogConstructor func(request *reconcile.Request) logr.Logger
// CacheSyncTimeout refers to the time limit set to wait for syncing caches.
// Defaults to 2 minutes if not set.
CacheSyncTimeout time.Duration
// RecoverPanic indicates whether the panic caused by reconcile should be recovered.
RecoverPanic bool
}
// Controller implements a Kubernetes API. A Controller manages a work queue fed reconcile.Requests
// from source.Sources. Work is performed through the reconcile.Reconciler for each enqueued item.
// Work typically is reads and writes Kubernetes objects to make the system state match the state specified
// in the object Spec.
type Controller interface {
// Reconciler is called to reconcile an object by Namespace/Name
reconcile.Reconciler
// Watch takes events provided by a Source and uses the EventHandler to
// enqueue reconcile.Requests in response to the events.
//
// Watch may be provided one or more Predicates to filter events before
// they are given to the EventHandler. Events will be passed to the
// EventHandler if all provided Predicates evaluate to true.
Watch(src source.Source, eventhandler handler.EventHandler, predicates ...predicate.Predicate) error
// Start starts the controller. Start blocks until the context is closed or a
// controller has an error starting.
Start(ctx context.Context) error
// GetLogger returns this controller logger prefilled with basic information.
GetLogger() logr.Logger
}
// New returns a new Controller registered with the Manager. The Manager will ensure that shared Caches have
// been synced before the Controller is Started.
func New(name string, mgr manager.Manager, options Options) (Controller, error) {
c, err := NewUnmanaged(name, mgr, options)
if err != nil {
return nil, err
}
// Add the controller as a Manager components
return c, mgr.Add(c)
}
// NewUnmanaged returns a new controller without adding it to the manager. The
// caller is responsible for starting the returned controller.
func NewUnmanaged(name string, mgr manager.Manager, options Options) (Controller, error) {
if options.Reconciler == nil {
return nil, fmt.Errorf("must specify Reconciler")
}
if len(name) == 0 {
return nil, fmt.Errorf("must specify Name for Controller")
}
if options.LogConstructor == nil {
log := mgr.GetLogger().WithValues(
"controller", name,
)
options.LogConstructor = func(req *reconcile.Request) logr.Logger {
log := log
if req != nil {
log = log.WithValues(
"object", klog.KRef(req.Namespace, req.Name),
"namespace", req.Namespace, "name", req.Name,
)
}
return log
}
}
if options.MaxConcurrentReconciles <= 0 {
options.MaxConcurrentReconciles = 1
}
if options.CacheSyncTimeout == 0 {
options.CacheSyncTimeout = 2 * time.Minute
}
if options.RateLimiter == nil {
options.RateLimiter = workqueue.DefaultControllerRateLimiter()
}
// Inject dependencies into Reconciler
if err := mgr.SetFields(options.Reconciler); err != nil {
return nil, err
}
// Create controller with dependencies set
return &controller.Controller{
Do: options.Reconciler,
MakeQueue: func() workqueue.RateLimitingInterface {
return workqueue.NewNamedRateLimitingQueue(options.RateLimiter, name)
},
MaxConcurrentReconciles: options.MaxConcurrentReconciles,
CacheSyncTimeout: options.CacheSyncTimeout,
SetFields: mgr.SetFields,
Name: name,
LogConstructor: options.LogConstructor,
RecoverPanic: options.RecoverPanic,
}, nil
}

View File

@@ -0,0 +1,394 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package controllerutil
import (
"context"
"fmt"
"reflect"
"k8s.io/apimachinery/pkg/api/equality"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/utils/pointer"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/client/apiutil"
)
// AlreadyOwnedError is an error returned if the object you are trying to assign
// a controller reference is already owned by another controller Object is the
// subject and Owner is the reference for the current owner.
type AlreadyOwnedError struct {
Object metav1.Object
Owner metav1.OwnerReference
}
func (e *AlreadyOwnedError) Error() string {
return fmt.Sprintf("Object %s/%s is already owned by another %s controller %s", e.Object.GetNamespace(), e.Object.GetName(), e.Owner.Kind, e.Owner.Name)
}
func newAlreadyOwnedError(obj metav1.Object, owner metav1.OwnerReference) *AlreadyOwnedError {
return &AlreadyOwnedError{
Object: obj,
Owner: owner,
}
}
// SetControllerReference sets owner as a Controller OwnerReference on controlled.
// This is used for garbage collection of the controlled object and for
// reconciling the owner object on changes to controlled (with a Watch + EnqueueRequestForOwner).
// Since only one OwnerReference can be a controller, it returns an error if
// there is another OwnerReference with Controller flag set.
func SetControllerReference(owner, controlled metav1.Object, scheme *runtime.Scheme) error {
// Validate the owner.
ro, ok := owner.(runtime.Object)
if !ok {
return fmt.Errorf("%T is not a runtime.Object, cannot call SetControllerReference", owner)
}
if err := validateOwner(owner, controlled); err != nil {
return err
}
// Create a new controller ref.
gvk, err := apiutil.GVKForObject(ro, scheme)
if err != nil {
return err
}
ref := metav1.OwnerReference{
APIVersion: gvk.GroupVersion().String(),
Kind: gvk.Kind,
Name: owner.GetName(),
UID: owner.GetUID(),
BlockOwnerDeletion: pointer.BoolPtr(true),
Controller: pointer.BoolPtr(true),
}
// Return early with an error if the object is already controlled.
if existing := metav1.GetControllerOf(controlled); existing != nil && !referSameObject(*existing, ref) {
return newAlreadyOwnedError(controlled, *existing)
}
// Update owner references and return.
upsertOwnerRef(ref, controlled)
return nil
}
// SetOwnerReference is a helper method to make sure the given object contains an object reference to the object provided.
// This allows you to declare that owner has a dependency on the object without specifying it as a controller.
// If a reference to the same object already exists, it'll be overwritten with the newly provided version.
func SetOwnerReference(owner, object metav1.Object, scheme *runtime.Scheme) error {
// Validate the owner.
ro, ok := owner.(runtime.Object)
if !ok {
return fmt.Errorf("%T is not a runtime.Object, cannot call SetOwnerReference", owner)
}
if err := validateOwner(owner, object); err != nil {
return err
}
// Create a new owner ref.
gvk, err := apiutil.GVKForObject(ro, scheme)
if err != nil {
return err
}
ref := metav1.OwnerReference{
APIVersion: gvk.GroupVersion().String(),
Kind: gvk.Kind,
UID: owner.GetUID(),
Name: owner.GetName(),
}
// Update owner references and return.
upsertOwnerRef(ref, object)
return nil
}
func upsertOwnerRef(ref metav1.OwnerReference, object metav1.Object) {
owners := object.GetOwnerReferences()
if idx := indexOwnerRef(owners, ref); idx == -1 {
owners = append(owners, ref)
} else {
owners[idx] = ref
}
object.SetOwnerReferences(owners)
}
// indexOwnerRef returns the index of the owner reference in the slice if found, or -1.
func indexOwnerRef(ownerReferences []metav1.OwnerReference, ref metav1.OwnerReference) int {
for index, r := range ownerReferences {
if referSameObject(r, ref) {
return index
}
}
return -1
}
func validateOwner(owner, object metav1.Object) error {
ownerNs := owner.GetNamespace()
if ownerNs != "" {
objNs := object.GetNamespace()
if objNs == "" {
return fmt.Errorf("cluster-scoped resource must not have a namespace-scoped owner, owner's namespace %s", ownerNs)
}
if ownerNs != objNs {
return fmt.Errorf("cross-namespace owner references are disallowed, owner's namespace %s, obj's namespace %s", owner.GetNamespace(), object.GetNamespace())
}
}
return nil
}
// Returns true if a and b point to the same object.
func referSameObject(a, b metav1.OwnerReference) bool {
aGV, err := schema.ParseGroupVersion(a.APIVersion)
if err != nil {
return false
}
bGV, err := schema.ParseGroupVersion(b.APIVersion)
if err != nil {
return false
}
return aGV.Group == bGV.Group && a.Kind == b.Kind && a.Name == b.Name
}
// OperationResult is the action result of a CreateOrUpdate call.
type OperationResult string
const ( // They should complete the sentence "Deployment default/foo has been ..."
// OperationResultNone means that the resource has not been changed.
OperationResultNone OperationResult = "unchanged"
// OperationResultCreated means that a new resource is created.
OperationResultCreated OperationResult = "created"
// OperationResultUpdated means that an existing resource is updated.
OperationResultUpdated OperationResult = "updated"
// OperationResultUpdatedStatus means that an existing resource and its status is updated.
OperationResultUpdatedStatus OperationResult = "updatedStatus"
// OperationResultUpdatedStatusOnly means that only an existing status is updated.
OperationResultUpdatedStatusOnly OperationResult = "updatedStatusOnly"
)
// CreateOrUpdate creates or updates the given object in the Kubernetes
// cluster. The object's desired state must be reconciled with the existing
// state inside the passed in callback MutateFn.
//
// The MutateFn is called regardless of creating or updating an object.
//
// It returns the executed operation and an error.
func CreateOrUpdate(ctx context.Context, c client.Client, obj client.Object, f MutateFn) (OperationResult, error) {
key := client.ObjectKeyFromObject(obj)
if err := c.Get(ctx, key, obj); err != nil {
if !apierrors.IsNotFound(err) {
return OperationResultNone, err
}
if err := mutate(f, key, obj); err != nil {
return OperationResultNone, err
}
if err := c.Create(ctx, obj); err != nil {
return OperationResultNone, err
}
return OperationResultCreated, nil
}
existing := obj.DeepCopyObject() //nolint
if err := mutate(f, key, obj); err != nil {
return OperationResultNone, err
}
if equality.Semantic.DeepEqual(existing, obj) {
return OperationResultNone, nil
}
if err := c.Update(ctx, obj); err != nil {
return OperationResultNone, err
}
return OperationResultUpdated, nil
}
// CreateOrPatch creates or patches the given object in the Kubernetes
// cluster. The object's desired state must be reconciled with the before
// state inside the passed in callback MutateFn.
//
// The MutateFn is called regardless of creating or updating an object.
//
// It returns the executed operation and an error.
func CreateOrPatch(ctx context.Context, c client.Client, obj client.Object, f MutateFn) (OperationResult, error) {
key := client.ObjectKeyFromObject(obj)
if err := c.Get(ctx, key, obj); err != nil {
if !apierrors.IsNotFound(err) {
return OperationResultNone, err
}
if f != nil {
if err := mutate(f, key, obj); err != nil {
return OperationResultNone, err
}
}
if err := c.Create(ctx, obj); err != nil {
return OperationResultNone, err
}
return OperationResultCreated, nil
}
// Create patches for the object and its possible status.
objPatch := client.MergeFrom(obj.DeepCopyObject().(client.Object))
statusPatch := client.MergeFrom(obj.DeepCopyObject().(client.Object))
// Create a copy of the original object as well as converting that copy to
// unstructured data.
before, err := runtime.DefaultUnstructuredConverter.ToUnstructured(obj.DeepCopyObject())
if err != nil {
return OperationResultNone, err
}
// Attempt to extract the status from the resource for easier comparison later
beforeStatus, hasBeforeStatus, err := unstructured.NestedFieldCopy(before, "status")
if err != nil {
return OperationResultNone, err
}
// If the resource contains a status then remove it from the unstructured
// copy to avoid unnecessary patching later.
if hasBeforeStatus {
unstructured.RemoveNestedField(before, "status")
}
// Mutate the original object.
if f != nil {
if err := mutate(f, key, obj); err != nil {
return OperationResultNone, err
}
}
// Convert the resource to unstructured to compare against our before copy.
after, err := runtime.DefaultUnstructuredConverter.ToUnstructured(obj)
if err != nil {
return OperationResultNone, err
}
// Attempt to extract the status from the resource for easier comparison later
afterStatus, hasAfterStatus, err := unstructured.NestedFieldCopy(after, "status")
if err != nil {
return OperationResultNone, err
}
// If the resource contains a status then remove it from the unstructured
// copy to avoid unnecessary patching later.
if hasAfterStatus {
unstructured.RemoveNestedField(after, "status")
}
result := OperationResultNone
if !reflect.DeepEqual(before, after) {
// Only issue a Patch if the before and after resources (minus status) differ
if err := c.Patch(ctx, obj, objPatch); err != nil {
return result, err
}
result = OperationResultUpdated
}
if (hasBeforeStatus || hasAfterStatus) && !reflect.DeepEqual(beforeStatus, afterStatus) {
// Only issue a Status Patch if the resource has a status and the beforeStatus
// and afterStatus copies differ
if result == OperationResultUpdated {
// If Status was replaced by Patch before, set it to afterStatus
objectAfterPatch, err := runtime.DefaultUnstructuredConverter.ToUnstructured(obj)
if err != nil {
return result, err
}
if err = unstructured.SetNestedField(objectAfterPatch, afterStatus, "status"); err != nil {
return result, err
}
// If Status was replaced by Patch before, restore patched structure to the obj
if err = runtime.DefaultUnstructuredConverter.FromUnstructured(objectAfterPatch, obj); err != nil {
return result, err
}
}
if err := c.Status().Patch(ctx, obj, statusPatch); err != nil {
return result, err
}
if result == OperationResultUpdated {
result = OperationResultUpdatedStatus
} else {
result = OperationResultUpdatedStatusOnly
}
}
return result, nil
}
// mutate wraps a MutateFn and applies validation to its result.
func mutate(f MutateFn, key client.ObjectKey, obj client.Object) error {
if err := f(); err != nil {
return err
}
if newKey := client.ObjectKeyFromObject(obj); key != newKey {
return fmt.Errorf("MutateFn cannot mutate object name and/or object namespace")
}
return nil
}
// MutateFn is a function which mutates the existing object into its desired state.
type MutateFn func() error
// AddFinalizer accepts an Object and adds the provided finalizer if not present.
// It returns an indication of whether it updated the object's list of finalizers.
func AddFinalizer(o client.Object, finalizer string) (finalizersUpdated bool) {
f := o.GetFinalizers()
for _, e := range f {
if e == finalizer {
return false
}
}
o.SetFinalizers(append(f, finalizer))
return true
}
// RemoveFinalizer accepts an Object and removes the provided finalizer if present.
// It returns an indication of whether it updated the object's list of finalizers.
func RemoveFinalizer(o client.Object, finalizer string) (finalizersUpdated bool) {
f := o.GetFinalizers()
for i := 0; i < len(f); i++ {
if f[i] == finalizer {
f = append(f[:i], f[i+1:]...)
i--
finalizersUpdated = true
}
}
o.SetFinalizers(f)
return
}
// ContainsFinalizer checks an Object that the provided finalizer is present.
func ContainsFinalizer(o client.Object, finalizer string) bool {
f := o.GetFinalizers()
for _, e := range f {
if e == finalizer {
return true
}
}
return false
}
// Object allows functions to work indistinctly with any resource that
// implements both Object interfaces.
//
// Deprecated: Use client.Object instead.
type Object = client.Object

View File

@@ -0,0 +1,20 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
Package controllerutil contains utility functions for working with and implementing Controllers.
*/
package controllerutil

View File

@@ -0,0 +1,25 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
Package controller provides types and functions for building Controllers. Controllers implement Kubernetes APIs.
# Creation
To create a new Controller, first create a manager.Manager and pass it to the controller.New function.
The Controller MUST be started by calling Manager.Start.
*/
package controller

View File

@@ -0,0 +1,40 @@
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
Package conversion provides interface definitions that an API Type needs to
implement for it to be supported by the generic conversion webhook handler
defined under pkg/webhook/conversion.
*/
package conversion
import "k8s.io/apimachinery/pkg/runtime"
// Convertible defines capability of a type to convertible i.e. it can be converted to/from a hub type.
type Convertible interface {
runtime.Object
ConvertTo(dst Hub) error
ConvertFrom(src Hub) error
}
// Hub marks that a given type is the hub type for conversion. This means that
// all conversions will first convert to the hub type, then convert from the hub
// type to the destination type. All types besides the hub type should implement
// Convertible.
type Hub interface {
runtime.Object
Hub()
}

View File

@@ -0,0 +1,456 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package envtest
import (
"bufio"
"bytes"
"context"
"errors"
"fmt"
"io"
"os"
"path/filepath"
"time"
apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
"k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/wait"
k8syaml "k8s.io/apimachinery/pkg/util/yaml"
"k8s.io/client-go/kubernetes/scheme"
"k8s.io/client-go/rest"
"k8s.io/client-go/util/retry"
"k8s.io/utils/pointer"
"sigs.k8s.io/yaml"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/webhook/conversion"
)
// CRDInstallOptions are the options for installing CRDs.
type CRDInstallOptions struct {
// Scheme is used to determine if conversion webhooks should be enabled
// for a particular CRD / object.
//
// Conversion webhooks are going to be enabled if an object in the scheme
// implements Hub and Spoke conversions.
//
// If nil, scheme.Scheme is used.
Scheme *runtime.Scheme
// Paths is a list of paths to the directories or files containing CRDs
Paths []string
// CRDs is a list of CRDs to install
CRDs []*apiextensionsv1.CustomResourceDefinition
// ErrorIfPathMissing will cause an error if a Path does not exist
ErrorIfPathMissing bool
// MaxTime is the max time to wait
MaxTime time.Duration
// PollInterval is the interval to check
PollInterval time.Duration
// CleanUpAfterUse will cause the CRDs listed for installation to be
// uninstalled when terminating the test environment.
// Defaults to false.
CleanUpAfterUse bool
// WebhookOptions contains the conversion webhook information to install
// on the CRDs. This field is usually inherited by the EnvTest options.
//
// If you're passing this field manually, you need to make sure that
// the CA information and host port is filled in properly.
WebhookOptions WebhookInstallOptions
}
const defaultPollInterval = 100 * time.Millisecond
const defaultMaxWait = 10 * time.Second
// InstallCRDs installs a collection of CRDs into a cluster by reading the crd yaml files from a directory.
func InstallCRDs(config *rest.Config, options CRDInstallOptions) ([]*apiextensionsv1.CustomResourceDefinition, error) {
defaultCRDOptions(&options)
// Read the CRD yamls into options.CRDs
if err := readCRDFiles(&options); err != nil {
return nil, fmt.Errorf("unable to read CRD files: %w", err)
}
if err := modifyConversionWebhooks(options.CRDs, options.Scheme, options.WebhookOptions); err != nil {
return nil, err
}
// Create the CRDs in the apiserver
if err := CreateCRDs(config, options.CRDs); err != nil {
return options.CRDs, fmt.Errorf("unable to create CRD instances: %w", err)
}
// Wait for the CRDs to appear as Resources in the apiserver
if err := WaitForCRDs(config, options.CRDs, options); err != nil {
return options.CRDs, fmt.Errorf("something went wrong waiting for CRDs to appear as API resources: %w", err)
}
return options.CRDs, nil
}
// readCRDFiles reads the directories of CRDs in options.Paths and adds the CRD structs to options.CRDs.
func readCRDFiles(options *CRDInstallOptions) error {
if len(options.Paths) > 0 {
crdList, err := renderCRDs(options)
if err != nil {
return err
}
options.CRDs = append(options.CRDs, crdList...)
}
return nil
}
// defaultCRDOptions sets the default values for CRDs.
func defaultCRDOptions(o *CRDInstallOptions) {
if o.Scheme == nil {
o.Scheme = scheme.Scheme
}
if o.MaxTime == 0 {
o.MaxTime = defaultMaxWait
}
if o.PollInterval == 0 {
o.PollInterval = defaultPollInterval
}
}
// WaitForCRDs waits for the CRDs to appear in discovery.
func WaitForCRDs(config *rest.Config, crds []*apiextensionsv1.CustomResourceDefinition, options CRDInstallOptions) error {
// Add each CRD to a map of GroupVersion to Resource
waitingFor := map[schema.GroupVersion]*sets.String{}
for _, crd := range crds {
gvs := []schema.GroupVersion{}
for _, version := range crd.Spec.Versions {
if version.Served {
gvs = append(gvs, schema.GroupVersion{Group: crd.Spec.Group, Version: version.Name})
}
}
for _, gv := range gvs {
log.V(1).Info("adding API in waitlist", "GV", gv)
if _, found := waitingFor[gv]; !found {
// Initialize the set
waitingFor[gv] = &sets.String{}
}
// Add the Resource
waitingFor[gv].Insert(crd.Spec.Names.Plural)
}
}
// Poll until all resources are found in discovery
p := &poller{config: config, waitingFor: waitingFor}
return wait.PollImmediate(options.PollInterval, options.MaxTime, p.poll)
}
// poller checks if all the resources have been found in discovery, and returns false if not.
type poller struct {
// config is used to get discovery
config *rest.Config
// waitingFor is the map of resources keyed by group version that have not yet been found in discovery
waitingFor map[schema.GroupVersion]*sets.String
}
// poll checks if all the resources have been found in discovery, and returns false if not.
func (p *poller) poll() (done bool, err error) {
// Create a new clientset to avoid any client caching of discovery
cs, err := clientset.NewForConfig(p.config)
if err != nil {
return false, err
}
allFound := true
for gv, resources := range p.waitingFor {
// All resources found, do nothing
if resources.Len() == 0 {
delete(p.waitingFor, gv)
continue
}
// Get the Resources for this GroupVersion
// TODO: Maybe the controller-runtime client should be able to do this...
resourceList, err := cs.Discovery().ServerResourcesForGroupVersion(gv.Group + "/" + gv.Version)
if err != nil {
return false, nil //nolint:nilerr
}
// Remove each found resource from the resources set that we are waiting for
for _, resource := range resourceList.APIResources {
resources.Delete(resource.Name)
}
// Still waiting on some resources in this group version
if resources.Len() != 0 {
allFound = false
}
}
return allFound, nil
}
// UninstallCRDs uninstalls a collection of CRDs by reading the crd yaml files from a directory.
func UninstallCRDs(config *rest.Config, options CRDInstallOptions) error {
// Read the CRD yamls into options.CRDs
if err := readCRDFiles(&options); err != nil {
return err
}
// Delete the CRDs from the apiserver
cs, err := client.New(config, client.Options{})
if err != nil {
return err
}
// Uninstall each CRD
for _, crd := range options.CRDs {
crd := crd
log.V(1).Info("uninstalling CRD", "crd", crd.GetName())
if err := cs.Delete(context.TODO(), crd); err != nil {
// If CRD is not found, we can consider success
if !apierrors.IsNotFound(err) {
return err
}
}
}
return nil
}
// CreateCRDs creates the CRDs.
func CreateCRDs(config *rest.Config, crds []*apiextensionsv1.CustomResourceDefinition) error {
cs, err := client.New(config, client.Options{})
if err != nil {
return fmt.Errorf("unable to create client: %w", err)
}
// Create each CRD
for _, crd := range crds {
crd := crd
log.V(1).Info("installing CRD", "crd", crd.GetName())
existingCrd := crd.DeepCopy()
err := cs.Get(context.TODO(), client.ObjectKey{Name: crd.GetName()}, existingCrd)
switch {
case apierrors.IsNotFound(err):
if err := cs.Create(context.TODO(), crd); err != nil {
return fmt.Errorf("unable to create CRD %q: %w", crd.GetName(), err)
}
case err != nil:
return fmt.Errorf("unable to get CRD %q to check if it exists: %w", crd.GetName(), err)
default:
log.V(1).Info("CRD already exists, updating", "crd", crd.GetName())
if err := retry.RetryOnConflict(retry.DefaultBackoff, func() error {
if err := cs.Get(context.TODO(), client.ObjectKey{Name: crd.GetName()}, existingCrd); err != nil {
return err
}
crd.SetResourceVersion(existingCrd.GetResourceVersion())
return cs.Update(context.TODO(), crd)
}); err != nil {
return err
}
}
}
return nil
}
// renderCRDs iterate through options.Paths and extract all CRD files.
func renderCRDs(options *CRDInstallOptions) ([]*apiextensionsv1.CustomResourceDefinition, error) {
type GVKN struct {
GVK schema.GroupVersionKind
Name string
}
crds := map[GVKN]*apiextensionsv1.CustomResourceDefinition{}
for _, path := range options.Paths {
var (
err error
info os.FileInfo
files []string
filePath = path
)
// Return the error if ErrorIfPathMissing exists
if info, err = os.Stat(path); os.IsNotExist(err) {
if options.ErrorIfPathMissing {
return nil, err
}
continue
}
if !info.IsDir() {
filePath, files = filepath.Dir(path), []string{info.Name()}
} else {
entries, err := os.ReadDir(path)
if err != nil {
return nil, err
}
for _, e := range entries {
files = append(files, e.Name())
}
}
log.V(1).Info("reading CRDs from path", "path", path)
crdList, err := readCRDs(filePath, files)
if err != nil {
return nil, err
}
for i, crd := range crdList {
gvkn := GVKN{GVK: crd.GroupVersionKind(), Name: crd.GetName()}
if _, found := crds[gvkn]; found {
// Currently, we only print a log when there are duplicates. We may want to error out if that makes more sense.
log.Info("there are more than one CRD definitions with the same <Group, Version, Kind, Name>", "GVKN", gvkn)
}
// We always use the CRD definition that we found last.
crds[gvkn] = crdList[i]
}
}
// Converting map to a list to return
res := []*apiextensionsv1.CustomResourceDefinition{}
for _, obj := range crds {
res = append(res, obj)
}
return res, nil
}
// modifyConversionWebhooks takes all the registered CustomResourceDefinitions and applies modifications
// to conditionally enable webhooks if the type is registered within the scheme.
func modifyConversionWebhooks(crds []*apiextensionsv1.CustomResourceDefinition, scheme *runtime.Scheme, webhookOptions WebhookInstallOptions) error {
if len(webhookOptions.LocalServingCAData) == 0 {
return nil
}
// Determine all registered convertible types.
convertibles := map[schema.GroupKind]struct{}{}
for gvk := range scheme.AllKnownTypes() {
obj, err := scheme.New(gvk)
if err != nil {
return err
}
if ok, err := conversion.IsConvertible(scheme, obj); ok && err == nil {
convertibles[gvk.GroupKind()] = struct{}{}
}
}
// generate host port.
hostPort, err := webhookOptions.generateHostPort()
if err != nil {
return err
}
url := pointer.StringPtr(fmt.Sprintf("https://%s/convert", hostPort))
for i := range crds {
// Continue if we're preserving unknown fields.
if crds[i].Spec.PreserveUnknownFields {
continue
}
// Continue if the GroupKind isn't registered as being convertible.
if _, ok := convertibles[schema.GroupKind{
Group: crds[i].Spec.Group,
Kind: crds[i].Spec.Names.Kind,
}]; !ok {
continue
}
if crds[i].Spec.Conversion == nil {
crds[i].Spec.Conversion = &apiextensionsv1.CustomResourceConversion{
Webhook: &apiextensionsv1.WebhookConversion{},
}
}
crds[i].Spec.Conversion.Strategy = apiextensionsv1.WebhookConverter
crds[i].Spec.Conversion.Webhook.ConversionReviewVersions = []string{"v1", "v1beta1"}
crds[i].Spec.Conversion.Webhook.ClientConfig = &apiextensionsv1.WebhookClientConfig{
Service: nil,
URL: url,
CABundle: webhookOptions.LocalServingCAData,
}
}
return nil
}
// readCRDs reads the CRDs from files and Unmarshals them into structs.
func readCRDs(basePath string, files []string) ([]*apiextensionsv1.CustomResourceDefinition, error) {
var crds []*apiextensionsv1.CustomResourceDefinition
// White list the file extensions that may contain CRDs
crdExts := sets.NewString(".json", ".yaml", ".yml")
for _, file := range files {
// Only parse allowlisted file types
if !crdExts.Has(filepath.Ext(file)) {
continue
}
// Unmarshal CRDs from file into structs
docs, err := readDocuments(filepath.Join(basePath, file))
if err != nil {
return nil, err
}
for _, doc := range docs {
crd := &apiextensionsv1.CustomResourceDefinition{}
if err = yaml.Unmarshal(doc, crd); err != nil {
return nil, err
}
if crd.Kind != "CustomResourceDefinition" || crd.Spec.Names.Kind == "" || crd.Spec.Group == "" {
continue
}
crds = append(crds, crd)
}
log.V(1).Info("read CRDs from file", "file", file)
}
return crds, nil
}
// readDocuments reads documents from file.
func readDocuments(fp string) ([][]byte, error) {
b, err := os.ReadFile(fp)
if err != nil {
return nil, err
}
docs := [][]byte{}
reader := k8syaml.NewYAMLReader(bufio.NewReader(bytes.NewReader(b)))
for {
// Read document
doc, err := reader.Read()
if err != nil {
if errors.Is(err, io.EOF) {
break
}
return nil, err
}
docs = append(docs, doc)
}
return docs, nil
}

View File

@@ -0,0 +1,26 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package envtest provides libraries for integration testing by starting a local control plane
//
// Control plane binaries (etcd and kube-apiserver) are loaded by default from
// /usr/local/kubebuilder/bin. This can be overridden by setting the
// KUBEBUILDER_ASSETS environment variable, or by directly creating a
// ControlPlane for the Environment to use.
//
// Environment can also be configured to work with an existing cluster, and
// simply load CRDs and provide client configuration.
package envtest

View File

@@ -0,0 +1,69 @@
/*
Copyright 2021 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package envtest
import (
apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
"k8s.io/client-go/kubernetes/scheme"
)
var (
crdScheme = scheme.Scheme
)
// init is required to correctly initialize the crdScheme package variable.
func init() {
_ = apiextensionsv1.AddToScheme(crdScheme)
}
// mergePaths merges two string slices containing paths.
// This function makes no guarantees about order of the merged slice.
func mergePaths(s1, s2 []string) []string {
m := make(map[string]struct{})
for _, s := range s1 {
m[s] = struct{}{}
}
for _, s := range s2 {
m[s] = struct{}{}
}
merged := make([]string, len(m))
i := 0
for key := range m {
merged[i] = key
i++
}
return merged
}
// mergeCRDs merges two CRD slices using their names.
// This function makes no guarantees about order of the merged slice.
func mergeCRDs(s1, s2 []*apiextensionsv1.CustomResourceDefinition) []*apiextensionsv1.CustomResourceDefinition {
m := make(map[string]*apiextensionsv1.CustomResourceDefinition)
for _, obj := range s1 {
m[obj.GetName()] = obj
}
for _, obj := range s2 {
m[obj.GetName()] = obj
}
merged := make([]*apiextensionsv1.CustomResourceDefinition, len(m))
i := 0
for _, obj := range m {
merged[i] = obj.DeepCopy()
i++
}
return merged
}

View File

@@ -0,0 +1,53 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package printer contains setup for a friendlier Ginkgo printer that's easier
// to parse by test automation.
package printer
import (
"fmt"
"github.com/onsi/ginkgo"
"github.com/onsi/ginkgo/config"
"github.com/onsi/ginkgo/types"
)
var _ ginkgo.Reporter = NewlineReporter{}
// NewlineReporter is Reporter that Prints a newline after the default Reporter output so that the results
// are correctly parsed by test automation.
// See issue https://github.com/jstemmer/go-junit-report/issues/31
type NewlineReporter struct{}
// SpecSuiteWillBegin implements ginkgo.Reporter.
func (NewlineReporter) SpecSuiteWillBegin(config config.GinkgoConfigType, summary *types.SuiteSummary) {
}
// BeforeSuiteDidRun implements ginkgo.Reporter.
func (NewlineReporter) BeforeSuiteDidRun(setupSummary *types.SetupSummary) {}
// AfterSuiteDidRun implements ginkgo.Reporter.
func (NewlineReporter) AfterSuiteDidRun(setupSummary *types.SetupSummary) {}
// SpecWillRun implements ginkgo.Reporter.
func (NewlineReporter) SpecWillRun(specSummary *types.SpecSummary) {}
// SpecDidComplete implements ginkgo.Reporter.
func (NewlineReporter) SpecDidComplete(specSummary *types.SpecSummary) {}
// SpecSuiteDidEnd Prints a newline between "35 Passed | 0 Failed | 0 Pending | 0 Skipped" and "--- PASS:".
func (NewlineReporter) SpecSuiteDidEnd(summary *types.SuiteSummary) { fmt.Printf("\n") }

View File

@@ -0,0 +1,109 @@
/*
Copyright 2020 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package printer
import (
"fmt"
"os"
"path/filepath"
"sync"
"github.com/onsi/ginkgo"
"github.com/onsi/ginkgo/config"
"github.com/onsi/ginkgo/reporters"
"github.com/onsi/ginkgo/types"
"k8s.io/apimachinery/pkg/util/sets"
)
var (
allRegisteredSuites = sets.String{}
allRegisteredSuitesLock = &sync.Mutex{}
)
type prowReporter struct {
junitReporter *reporters.JUnitReporter
}
// NewProwReporter returns a prowReporter that will write out junit if running in Prow and do
// nothing otherwise.
// WARNING: It seems this does not always properly fail the test runs when there are failures,
// see https://github.com/onsi/ginkgo/issues/706
// When using this you must make sure to grep for failures in your junit xmls and fail the run
// if there are any.
func NewProwReporter(suiteName string) ginkgo.Reporter {
allRegisteredSuitesLock.Lock()
if allRegisteredSuites.Has(suiteName) {
panic(fmt.Sprintf("Suite named %q registered more than once", suiteName))
}
allRegisteredSuites.Insert(suiteName)
allRegisteredSuitesLock.Unlock()
if os.Getenv("CI") == "" {
return &prowReporter{}
}
artifactsDir := os.Getenv("ARTIFACTS")
if artifactsDir == "" {
return &prowReporter{}
}
path := filepath.Join(artifactsDir, fmt.Sprintf("junit_%s_%d.xml", suiteName, config.GinkgoConfig.ParallelNode))
return &prowReporter{
junitReporter: reporters.NewJUnitReporter(path),
}
}
func (pr *prowReporter) SpecSuiteWillBegin(config config.GinkgoConfigType, summary *types.SuiteSummary) {
if pr.junitReporter != nil {
pr.junitReporter.SpecSuiteWillBegin(config, summary)
}
}
// BeforeSuiteDidRun implements ginkgo.Reporter.
func (pr *prowReporter) BeforeSuiteDidRun(setupSummary *types.SetupSummary) {
if pr.junitReporter != nil {
pr.junitReporter.BeforeSuiteDidRun(setupSummary)
}
}
// AfterSuiteDidRun implements ginkgo.Reporter.
func (pr *prowReporter) AfterSuiteDidRun(setupSummary *types.SetupSummary) {
if pr.junitReporter != nil {
pr.junitReporter.AfterSuiteDidRun(setupSummary)
}
}
// SpecWillRun implements ginkgo.Reporter.
func (pr *prowReporter) SpecWillRun(specSummary *types.SpecSummary) {
if pr.junitReporter != nil {
pr.junitReporter.SpecWillRun(specSummary)
}
}
// SpecDidComplete implements ginkgo.Reporter.
func (pr *prowReporter) SpecDidComplete(specSummary *types.SpecSummary) {
if pr.junitReporter != nil {
pr.junitReporter.SpecDidComplete(specSummary)
}
}
// SpecSuiteDidEnd Prints a newline between "35 Passed | 0 Failed | 0 Pending | 0 Skipped" and "--- PASS:".
func (pr *prowReporter) SpecSuiteDidEnd(summary *types.SuiteSummary) {
if pr.junitReporter != nil {
pr.junitReporter.SpecSuiteDidEnd(summary)
}
}

View File

@@ -0,0 +1,374 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package envtest
import (
"fmt"
"os"
"strings"
"time"
apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/kubernetes/scheme"
"k8s.io/client-go/rest"
"sigs.k8s.io/controller-runtime/pkg/client/config"
logf "sigs.k8s.io/controller-runtime/pkg/internal/log"
"sigs.k8s.io/controller-runtime/pkg/internal/testing/controlplane"
"sigs.k8s.io/controller-runtime/pkg/internal/testing/process"
)
var log = logf.RuntimeLog.WithName("test-env")
/*
It's possible to override some defaults, by setting the following environment variables:
* USE_EXISTING_CLUSTER (boolean): if set to true, envtest will use an existing cluster
* TEST_ASSET_KUBE_APISERVER (string): path to the api-server binary to use
* TEST_ASSET_ETCD (string): path to the etcd binary to use
* TEST_ASSET_KUBECTL (string): path to the kubectl binary to use
* KUBEBUILDER_ASSETS (string): directory containing the binaries to use (api-server, etcd and kubectl). Defaults to /usr/local/kubebuilder/bin.
* KUBEBUILDER_CONTROLPLANE_START_TIMEOUT (string supported by time.ParseDuration): timeout for test control plane to start. Defaults to 20s.
* KUBEBUILDER_CONTROLPLANE_STOP_TIMEOUT (string supported by time.ParseDuration): timeout for test control plane to start. Defaults to 20s.
* KUBEBUILDER_ATTACH_CONTROL_PLANE_OUTPUT (boolean): if set to true, the control plane's stdout and stderr are attached to os.Stdout and os.Stderr
*/
const (
envUseExistingCluster = "USE_EXISTING_CLUSTER"
envStartTimeout = "KUBEBUILDER_CONTROLPLANE_START_TIMEOUT"
envStopTimeout = "KUBEBUILDER_CONTROLPLANE_STOP_TIMEOUT"
envAttachOutput = "KUBEBUILDER_ATTACH_CONTROL_PLANE_OUTPUT"
StartTimeout = 60
StopTimeout = 60
defaultKubebuilderControlPlaneStartTimeout = 20 * time.Second
defaultKubebuilderControlPlaneStopTimeout = 20 * time.Second
)
// internal types we expose as part of our public API.
type (
// ControlPlane is the re-exported ControlPlane type from the internal testing package.
ControlPlane = controlplane.ControlPlane
// APIServer is the re-exported APIServer from the internal testing package.
APIServer = controlplane.APIServer
// Etcd is the re-exported Etcd from the internal testing package.
Etcd = controlplane.Etcd
// User represents a Kubernetes user to provision for auth purposes.
User = controlplane.User
// AuthenticatedUser represets a Kubernetes user that's been provisioned.
AuthenticatedUser = controlplane.AuthenticatedUser
// ListenAddr indicates the address and port that the API server should listen on.
ListenAddr = process.ListenAddr
// SecureServing contains details describing how the API server should serve
// its secure endpoint.
SecureServing = controlplane.SecureServing
// Authn is an authentication method that can be used with the control plane to
// provision users.
Authn = controlplane.Authn
// Arguments allows configuring a process's flags.
Arguments = process.Arguments
// Arg is a single flag with one or more values.
Arg = process.Arg
)
var (
// EmptyArguments constructs a new set of flags with nothing set.
//
// This is mostly useful for testing helper methods -- you'll want to call
// Configure on the APIServer (or etcd) to configure their arguments.
EmptyArguments = process.EmptyArguments
)
// Environment creates a Kubernetes test environment that will start / stop the Kubernetes control plane and
// install extension APIs.
type Environment struct {
// ControlPlane is the ControlPlane including the apiserver and etcd
ControlPlane controlplane.ControlPlane
// Scheme is used to determine if conversion webhooks should be enabled
// for a particular CRD / object.
//
// Conversion webhooks are going to be enabled if an object in the scheme
// implements Hub and Spoke conversions.
//
// If nil, scheme.Scheme is used.
Scheme *runtime.Scheme
// Config can be used to talk to the apiserver. It's automatically
// populated if not set using the standard controller-runtime config
// loading.
Config *rest.Config
// CRDInstallOptions are the options for installing CRDs.
CRDInstallOptions CRDInstallOptions
// WebhookInstallOptions are the options for installing webhooks.
WebhookInstallOptions WebhookInstallOptions
// ErrorIfCRDPathMissing provides an interface for the underlying
// CRDInstallOptions.ErrorIfPathMissing. It prevents silent failures
// for missing CRD paths.
ErrorIfCRDPathMissing bool
// CRDs is a list of CRDs to install.
// If both this field and CRDs field in CRDInstallOptions are specified, the
// values are merged.
CRDs []*apiextensionsv1.CustomResourceDefinition
// CRDDirectoryPaths is a list of paths containing CRD yaml or json configs.
// If both this field and Paths field in CRDInstallOptions are specified, the
// values are merged.
CRDDirectoryPaths []string
// BinaryAssetsDirectory is the path where the binaries required for the envtest are
// located in the local environment. This field can be overridden by setting KUBEBUILDER_ASSETS.
BinaryAssetsDirectory string
// UseExistingCluster indicates that this environments should use an
// existing kubeconfig, instead of trying to stand up a new control plane.
// This is useful in cases that need aggregated API servers and the like.
UseExistingCluster *bool
// ControlPlaneStartTimeout is the maximum duration each controlplane component
// may take to start. It defaults to the KUBEBUILDER_CONTROLPLANE_START_TIMEOUT
// environment variable or 20 seconds if unspecified
ControlPlaneStartTimeout time.Duration
// ControlPlaneStopTimeout is the maximum duration each controlplane component
// may take to stop. It defaults to the KUBEBUILDER_CONTROLPLANE_STOP_TIMEOUT
// environment variable or 20 seconds if unspecified
ControlPlaneStopTimeout time.Duration
// KubeAPIServerFlags is the set of flags passed while starting the api server.
//
// Deprecated: use ControlPlane.GetAPIServer().Configure() instead.
KubeAPIServerFlags []string
// AttachControlPlaneOutput indicates if control plane output will be attached to os.Stdout and os.Stderr.
// Enable this to get more visibility of the testing control plane.
// It respect KUBEBUILDER_ATTACH_CONTROL_PLANE_OUTPUT environment variable.
AttachControlPlaneOutput bool
}
// Stop stops a running server.
// Previously installed CRDs, as listed in CRDInstallOptions.CRDs, will be uninstalled
// if CRDInstallOptions.CleanUpAfterUse are set to true.
func (te *Environment) Stop() error {
if te.CRDInstallOptions.CleanUpAfterUse {
if err := UninstallCRDs(te.Config, te.CRDInstallOptions); err != nil {
return err
}
}
if err := te.WebhookInstallOptions.Cleanup(); err != nil {
return err
}
if te.useExistingCluster() {
return nil
}
return te.ControlPlane.Stop()
}
// Start starts a local Kubernetes server and updates te.ApiserverPort with the port it is listening on.
func (te *Environment) Start() (*rest.Config, error) {
if te.useExistingCluster() {
log.V(1).Info("using existing cluster")
if te.Config == nil {
// we want to allow people to pass in their own config, so
// only load a config if it hasn't already been set.
log.V(1).Info("automatically acquiring client configuration")
var err error
te.Config, err = config.GetConfig()
if err != nil {
return nil, fmt.Errorf("unable to get configuration for existing cluster: %w", err)
}
}
} else {
apiServer := te.ControlPlane.GetAPIServer()
if len(apiServer.Args) == 0 { //nolint:staticcheck
// pass these through separately from above in case something like
// AddUser defaults APIServer.
//
// TODO(directxman12): if/when we feel like making a bigger
// breaking change here, just make APIServer and Etcd non-pointers
// in ControlPlane.
// NB(directxman12): we still pass these in so that things work if the
// user manually specifies them, but in most cases we expect them to
// be nil so that we use the new .Configure() logic.
apiServer.Args = te.KubeAPIServerFlags //nolint:staticcheck
}
if te.ControlPlane.Etcd == nil {
te.ControlPlane.Etcd = &controlplane.Etcd{}
}
if os.Getenv(envAttachOutput) == "true" {
te.AttachControlPlaneOutput = true
}
if apiServer.Out == nil && te.AttachControlPlaneOutput {
apiServer.Out = os.Stdout
}
if apiServer.Err == nil && te.AttachControlPlaneOutput {
apiServer.Err = os.Stderr
}
if te.ControlPlane.Etcd.Out == nil && te.AttachControlPlaneOutput {
te.ControlPlane.Etcd.Out = os.Stdout
}
if te.ControlPlane.Etcd.Err == nil && te.AttachControlPlaneOutput {
te.ControlPlane.Etcd.Err = os.Stderr
}
apiServer.Path = process.BinPathFinder("kube-apiserver", te.BinaryAssetsDirectory)
te.ControlPlane.Etcd.Path = process.BinPathFinder("etcd", te.BinaryAssetsDirectory)
te.ControlPlane.KubectlPath = process.BinPathFinder("kubectl", te.BinaryAssetsDirectory)
if err := te.defaultTimeouts(); err != nil {
return nil, fmt.Errorf("failed to default controlplane timeouts: %w", err)
}
te.ControlPlane.Etcd.StartTimeout = te.ControlPlaneStartTimeout
te.ControlPlane.Etcd.StopTimeout = te.ControlPlaneStopTimeout
apiServer.StartTimeout = te.ControlPlaneStartTimeout
apiServer.StopTimeout = te.ControlPlaneStopTimeout
log.V(1).Info("starting control plane")
if err := te.startControlPlane(); err != nil {
return nil, fmt.Errorf("unable to start control plane itself: %w", err)
}
// Create the *rest.Config for creating new clients
baseConfig := &rest.Config{
// gotta go fast during tests -- we don't really care about overwhelming our test API server
QPS: 1000.0,
Burst: 2000.0,
}
adminInfo := User{Name: "admin", Groups: []string{"system:masters"}}
adminUser, err := te.ControlPlane.AddUser(adminInfo, baseConfig)
if err != nil {
return te.Config, fmt.Errorf("unable to provision admin user: %w", err)
}
te.Config = adminUser.Config()
}
// Set the default scheme if nil.
if te.Scheme == nil {
te.Scheme = scheme.Scheme
}
// Call PrepWithoutInstalling to setup certificates first
// and have them available to patch CRD conversion webhook as well.
if err := te.WebhookInstallOptions.PrepWithoutInstalling(); err != nil {
return nil, err
}
log.V(1).Info("installing CRDs")
te.CRDInstallOptions.CRDs = mergeCRDs(te.CRDInstallOptions.CRDs, te.CRDs)
te.CRDInstallOptions.Paths = mergePaths(te.CRDInstallOptions.Paths, te.CRDDirectoryPaths)
te.CRDInstallOptions.ErrorIfPathMissing = te.ErrorIfCRDPathMissing
te.CRDInstallOptions.WebhookOptions = te.WebhookInstallOptions
crds, err := InstallCRDs(te.Config, te.CRDInstallOptions)
if err != nil {
return te.Config, fmt.Errorf("unable to install CRDs onto control plane: %w", err)
}
te.CRDs = crds
log.V(1).Info("installing webhooks")
if err := te.WebhookInstallOptions.Install(te.Config); err != nil {
return nil, fmt.Errorf("unable to install webhooks onto control plane: %w", err)
}
return te.Config, nil
}
// AddUser provisions a new user for connecting to this Environment. The user will
// have the specified name & belong to the specified groups.
//
// If you specify a "base" config, the returned REST Config will contain those
// settings as well as any required by the authentication method. You can use
// this to easily specify options like QPS.
//
// This is effectively a convinience alias for ControlPlane.AddUser -- see that
// for more low-level details.
func (te *Environment) AddUser(user User, baseConfig *rest.Config) (*AuthenticatedUser, error) {
return te.ControlPlane.AddUser(user, baseConfig)
}
func (te *Environment) startControlPlane() error {
numTries, maxRetries := 0, 5
var err error
for ; numTries < maxRetries; numTries++ {
// Start the control plane - retry if it fails
err = te.ControlPlane.Start()
if err == nil {
break
}
log.Error(err, "unable to start the controlplane", "tries", numTries)
}
if numTries == maxRetries {
return fmt.Errorf("failed to start the controlplane. retried %d times: %w", numTries, err)
}
return nil
}
func (te *Environment) defaultTimeouts() error {
var err error
if te.ControlPlaneStartTimeout == 0 {
if envVal := os.Getenv(envStartTimeout); envVal != "" {
te.ControlPlaneStartTimeout, err = time.ParseDuration(envVal)
if err != nil {
return err
}
} else {
te.ControlPlaneStartTimeout = defaultKubebuilderControlPlaneStartTimeout
}
}
if te.ControlPlaneStopTimeout == 0 {
if envVal := os.Getenv(envStopTimeout); envVal != "" {
te.ControlPlaneStopTimeout, err = time.ParseDuration(envVal)
if err != nil {
return err
}
} else {
te.ControlPlaneStopTimeout = defaultKubebuilderControlPlaneStopTimeout
}
}
return nil
}
func (te *Environment) useExistingCluster() bool {
if te.UseExistingCluster == nil {
return strings.ToLower(os.Getenv(envUseExistingCluster)) == "true"
}
return *te.UseExistingCluster
}
// DefaultKubeAPIServerFlags exposes the default args for the APIServer so that
// you can use those to append your own additional arguments.
//
// Deprecated: use APIServer.Configure() instead.
var DefaultKubeAPIServerFlags = controlplane.APIServerDefaultArgs //nolint:staticcheck

View File

@@ -0,0 +1,433 @@
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package envtest
import (
"context"
"fmt"
"net"
"os"
"path/filepath"
"time"
admissionv1 "k8s.io/api/admissionregistration/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/kubernetes/scheme"
"k8s.io/client-go/rest"
"sigs.k8s.io/controller-runtime/pkg/client/apiutil"
"sigs.k8s.io/yaml"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/internal/testing/addr"
"sigs.k8s.io/controller-runtime/pkg/internal/testing/certs"
)
// WebhookInstallOptions are the options for installing mutating or validating webhooks.
type WebhookInstallOptions struct {
// Paths is a list of paths to the directories or files containing the mutating or validating webhooks yaml or json configs.
Paths []string
// MutatingWebhooks is a list of MutatingWebhookConfigurations to install
MutatingWebhooks []*admissionv1.MutatingWebhookConfiguration
// ValidatingWebhooks is a list of ValidatingWebhookConfigurations to install
ValidatingWebhooks []*admissionv1.ValidatingWebhookConfiguration
// IgnoreErrorIfPathMissing will ignore an error if a DirectoryPath does not exist when set to true
IgnoreErrorIfPathMissing bool
// LocalServingHost is the host for serving webhooks on.
// it will be automatically populated
LocalServingHost string
// LocalServingPort is the allocated port for serving webhooks on.
// it will be automatically populated by a random available local port
LocalServingPort int
// LocalServingCertDir is the allocated directory for serving certificates.
// it will be automatically populated by the local temp dir
LocalServingCertDir string
// CAData is the CA that can be used to trust the serving certificates in LocalServingCertDir.
LocalServingCAData []byte
// LocalServingHostExternalName is the hostname to use to reach the webhook server.
LocalServingHostExternalName string
// MaxTime is the max time to wait
MaxTime time.Duration
// PollInterval is the interval to check
PollInterval time.Duration
}
// ModifyWebhookDefinitions modifies webhook definitions by:
// - applying CABundle based on the provided tinyca
// - if webhook client config uses service spec, it's removed and replaced with direct url.
func (o *WebhookInstallOptions) ModifyWebhookDefinitions() error {
caData := o.LocalServingCAData
// generate host port.
hostPort, err := o.generateHostPort()
if err != nil {
return err
}
for i := range o.MutatingWebhooks {
for j := range o.MutatingWebhooks[i].Webhooks {
updateClientConfig(&o.MutatingWebhooks[i].Webhooks[j].ClientConfig, hostPort, caData)
}
}
for i := range o.ValidatingWebhooks {
for j := range o.ValidatingWebhooks[i].Webhooks {
updateClientConfig(&o.ValidatingWebhooks[i].Webhooks[j].ClientConfig, hostPort, caData)
}
}
return nil
}
func updateClientConfig(cc *admissionv1.WebhookClientConfig, hostPort string, caData []byte) {
cc.CABundle = caData
if cc.Service != nil && cc.Service.Path != nil {
url := fmt.Sprintf("https://%s/%s", hostPort, *cc.Service.Path)
cc.URL = &url
cc.Service = nil
}
}
func (o *WebhookInstallOptions) generateHostPort() (string, error) {
if o.LocalServingPort == 0 {
port, host, err := addr.Suggest(o.LocalServingHost)
if err != nil {
return "", fmt.Errorf("unable to grab random port for serving webhooks on: %w", err)
}
o.LocalServingPort = port
o.LocalServingHost = host
}
host := o.LocalServingHostExternalName
if host == "" {
host = o.LocalServingHost
}
return net.JoinHostPort(host, fmt.Sprintf("%d", o.LocalServingPort)), nil
}
// PrepWithoutInstalling does the setup parts of Install (populating host-port,
// setting up CAs, etc), without actually truing to do anything with webhook
// definitions. This is largely useful for internal testing of
// controller-runtime, where we need a random host-port & caData for webhook
// tests, but may be useful in similar scenarios.
func (o *WebhookInstallOptions) PrepWithoutInstalling() error {
if err := o.setupCA(); err != nil {
return err
}
if err := parseWebhook(o); err != nil {
return err
}
return o.ModifyWebhookDefinitions()
}
// Install installs specified webhooks to the API server.
func (o *WebhookInstallOptions) Install(config *rest.Config) error {
if len(o.LocalServingCAData) == 0 {
if err := o.PrepWithoutInstalling(); err != nil {
return err
}
}
if err := createWebhooks(config, o.MutatingWebhooks, o.ValidatingWebhooks); err != nil {
return err
}
return WaitForWebhooks(config, o.MutatingWebhooks, o.ValidatingWebhooks, *o)
}
// Cleanup cleans up cert directories.
func (o *WebhookInstallOptions) Cleanup() error {
if o.LocalServingCertDir != "" {
return os.RemoveAll(o.LocalServingCertDir)
}
return nil
}
// WaitForWebhooks waits for the Webhooks to be available through API server.
func WaitForWebhooks(config *rest.Config,
mutatingWebhooks []*admissionv1.MutatingWebhookConfiguration,
validatingWebhooks []*admissionv1.ValidatingWebhookConfiguration,
options WebhookInstallOptions) error {
waitingFor := map[schema.GroupVersionKind]*sets.String{}
for _, hook := range mutatingWebhooks {
h := hook
gvk, err := apiutil.GVKForObject(h, scheme.Scheme)
if err != nil {
return fmt.Errorf("unable to get gvk for MutatingWebhookConfiguration %s: %w", hook.GetName(), err)
}
if _, ok := waitingFor[gvk]; !ok {
waitingFor[gvk] = &sets.String{}
}
waitingFor[gvk].Insert(h.GetName())
}
for _, hook := range validatingWebhooks {
h := hook
gvk, err := apiutil.GVKForObject(h, scheme.Scheme)
if err != nil {
return fmt.Errorf("unable to get gvk for ValidatingWebhookConfiguration %s: %w", hook.GetName(), err)
}
if _, ok := waitingFor[gvk]; !ok {
waitingFor[gvk] = &sets.String{}
}
waitingFor[gvk].Insert(hook.GetName())
}
// Poll until all resources are found in discovery
p := &webhookPoller{config: config, waitingFor: waitingFor}
return wait.PollImmediate(options.PollInterval, options.MaxTime, p.poll)
}
// poller checks if all the resources have been found in discovery, and returns false if not.
type webhookPoller struct {
// config is used to get discovery
config *rest.Config
// waitingFor is the map of resources keyed by group version that have not yet been found in discovery
waitingFor map[schema.GroupVersionKind]*sets.String
}
// poll checks if all the resources have been found in discovery, and returns false if not.
func (p *webhookPoller) poll() (done bool, err error) {
// Create a new clientset to avoid any client caching of discovery
c, err := client.New(p.config, client.Options{})
if err != nil {
return false, err
}
allFound := true
for gvk, names := range p.waitingFor {
if names.Len() == 0 {
delete(p.waitingFor, gvk)
continue
}
for _, name := range names.List() {
var obj = &unstructured.Unstructured{}
obj.SetGroupVersionKind(gvk)
err := c.Get(context.Background(), client.ObjectKey{
Namespace: "",
Name: name,
}, obj)
if err == nil {
names.Delete(name)
}
if apierrors.IsNotFound(err) {
allFound = false
}
if err != nil {
return false, err
}
}
}
return allFound, nil
}
// setupCA creates CA for testing and writes them to disk.
func (o *WebhookInstallOptions) setupCA() error {
hookCA, err := certs.NewTinyCA()
if err != nil {
return fmt.Errorf("unable to set up webhook CA: %w", err)
}
names := []string{"localhost", o.LocalServingHost, o.LocalServingHostExternalName}
hookCert, err := hookCA.NewServingCert(names...)
if err != nil {
return fmt.Errorf("unable to set up webhook serving certs: %w", err)
}
localServingCertsDir, err := os.MkdirTemp("", "envtest-serving-certs-")
o.LocalServingCertDir = localServingCertsDir
if err != nil {
return fmt.Errorf("unable to create directory for webhook serving certs: %w", err)
}
certData, keyData, err := hookCert.AsBytes()
if err != nil {
return fmt.Errorf("unable to marshal webhook serving certs: %w", err)
}
if err := os.WriteFile(filepath.Join(localServingCertsDir, "tls.crt"), certData, 0640); err != nil { //nolint:gosec
return fmt.Errorf("unable to write webhook serving cert to disk: %w", err)
}
if err := os.WriteFile(filepath.Join(localServingCertsDir, "tls.key"), keyData, 0640); err != nil { //nolint:gosec
return fmt.Errorf("unable to write webhook serving key to disk: %w", err)
}
o.LocalServingCAData = certData
return err
}
func createWebhooks(config *rest.Config, mutHooks []*admissionv1.MutatingWebhookConfiguration, valHooks []*admissionv1.ValidatingWebhookConfiguration) error {
cs, err := client.New(config, client.Options{})
if err != nil {
return err
}
// Create each webhook
for _, hook := range mutHooks {
hook := hook
log.V(1).Info("installing mutating webhook", "webhook", hook.GetName())
if err := ensureCreated(cs, hook); err != nil {
return err
}
}
for _, hook := range valHooks {
hook := hook
log.V(1).Info("installing validating webhook", "webhook", hook.GetName())
if err := ensureCreated(cs, hook); err != nil {
return err
}
}
return nil
}
// ensureCreated creates or update object if already exists in the cluster.
func ensureCreated(cs client.Client, obj client.Object) error {
existing := obj.DeepCopyObject().(client.Object)
err := cs.Get(context.Background(), client.ObjectKey{Name: obj.GetName()}, existing)
switch {
case apierrors.IsNotFound(err):
if err := cs.Create(context.Background(), obj); err != nil {
return err
}
case err != nil:
return err
default:
log.V(1).Info("Webhook configuration already exists, updating", "webhook", obj.GetName())
obj.SetResourceVersion(existing.GetResourceVersion())
if err := cs.Update(context.Background(), obj); err != nil {
return err
}
}
return nil
}
// parseWebhook reads the directories or files of Webhooks in options.Paths and adds the Webhook structs to options.
func parseWebhook(options *WebhookInstallOptions) error {
if len(options.Paths) > 0 {
for _, path := range options.Paths {
_, err := os.Stat(path)
if options.IgnoreErrorIfPathMissing && os.IsNotExist(err) {
continue // skip this path
}
if !options.IgnoreErrorIfPathMissing && os.IsNotExist(err) {
return err // treat missing path as error
}
mutHooks, valHooks, err := readWebhooks(path)
if err != nil {
return err
}
options.MutatingWebhooks = append(options.MutatingWebhooks, mutHooks...)
options.ValidatingWebhooks = append(options.ValidatingWebhooks, valHooks...)
}
}
return nil
}
// readWebhooks reads the Webhooks from files and Unmarshals them into structs
// returns slice of mutating and validating webhook configurations.
func readWebhooks(path string) ([]*admissionv1.MutatingWebhookConfiguration, []*admissionv1.ValidatingWebhookConfiguration, error) {
// Get the webhook files
var files []string
var err error
log.V(1).Info("reading Webhooks from path", "path", path)
info, err := os.Stat(path)
if err != nil {
return nil, nil, err
}
if !info.IsDir() {
path, files = filepath.Dir(path), []string{info.Name()}
} else {
entries, err := os.ReadDir(path)
if err != nil {
return nil, nil, err
}
for _, e := range entries {
files = append(files, e.Name())
}
}
// file extensions that may contain Webhooks
resourceExtensions := sets.NewString(".json", ".yaml", ".yml")
var mutHooks []*admissionv1.MutatingWebhookConfiguration
var valHooks []*admissionv1.ValidatingWebhookConfiguration
for _, file := range files {
// Only parse allowlisted file types
if !resourceExtensions.Has(filepath.Ext(file)) {
continue
}
// Unmarshal Webhooks from file into structs
docs, err := readDocuments(filepath.Join(path, file))
if err != nil {
return nil, nil, err
}
for _, doc := range docs {
var generic metav1.PartialObjectMetadata
if err = yaml.Unmarshal(doc, &generic); err != nil {
return nil, nil, err
}
const (
admissionregv1 = "admissionregistration.k8s.io/v1"
)
switch {
case generic.Kind == "MutatingWebhookConfiguration":
if generic.APIVersion != admissionregv1 {
return nil, nil, fmt.Errorf("only v1 is supported right now for MutatingWebhookConfiguration (name: %s)", generic.Name)
}
hook := &admissionv1.MutatingWebhookConfiguration{}
if err := yaml.Unmarshal(doc, hook); err != nil {
return nil, nil, err
}
mutHooks = append(mutHooks, hook)
case generic.Kind == "ValidatingWebhookConfiguration":
if generic.APIVersion != admissionregv1 {
return nil, nil, fmt.Errorf("only v1 is supported right now for ValidatingWebhookConfiguration (name: %s)", generic.Name)
}
hook := &admissionv1.ValidatingWebhookConfiguration{}
if err := yaml.Unmarshal(doc, hook); err != nil {
return nil, nil, err
}
valHooks = append(valHooks, hook)
default:
continue
}
}
log.V(1).Info("read webhooks from file", "file", file)
}
return mutHooks, valHooks, nil
}

28
vendor/sigs.k8s.io/controller-runtime/pkg/event/doc.go generated vendored Normal file
View File

@@ -0,0 +1,28 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
Package event contains the definitions for the Event types produced by source.Sources and transformed into
reconcile.Requests by handler.EventHandler.
You should rarely need to work with these directly -- instead, use Controller.Watch with
source.Sources and handler.EventHandlers.
Events generally contain both a full runtime.Object that caused the event, as well
as a direct handle to that object's metadata. This saves a lot of typecasting in
code that works with Events.
*/
package event

View File

@@ -0,0 +1,55 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package event
import "sigs.k8s.io/controller-runtime/pkg/client"
// CreateEvent is an event where a Kubernetes object was created. CreateEvent should be generated
// by a source.Source and transformed into a reconcile.Request by an handler.EventHandler.
type CreateEvent struct {
// Object is the object from the event
Object client.Object
}
// UpdateEvent is an event where a Kubernetes object was updated. UpdateEvent should be generated
// by a source.Source and transformed into a reconcile.Request by an handler.EventHandler.
type UpdateEvent struct {
// ObjectOld is the object from the event
ObjectOld client.Object
// ObjectNew is the object from the event
ObjectNew client.Object
}
// DeleteEvent is an event where a Kubernetes object was deleted. DeleteEvent should be generated
// by a source.Source and transformed into a reconcile.Request by an handler.EventHandler.
type DeleteEvent struct {
// Object is the object from the event
Object client.Object
// DeleteStateUnknown is true if the Delete event was missed but we identified the object
// as having been deleted.
DeleteStateUnknown bool
}
// GenericEvent is an event where the operation type is unknown (e.g. polling or event originating outside the cluster).
// GenericEvent should be generated by a source.Source and transformed into a reconcile.Request by an
// handler.EventHandler.
type GenericEvent struct {
// Object is the object from the event
Object client.Object
}

View File

@@ -0,0 +1,38 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
Package handler defines EventHandlers that enqueue reconcile.Requests in response to Create, Update, Deletion Events
observed from Watching Kubernetes APIs. Users should provide a source.Source and handler.EventHandler to
Controller.Watch in order to generate and enqueue reconcile.Request work items.
Generally, following premade event handlers should be sufficient for most use cases:
EventHandlers:
EnqueueRequestForObject - Enqueues a reconcile.Request containing the Name and Namespace of the object in the Event. This will
cause the object that was the source of the Event (e.g. the created / deleted / updated object) to be
reconciled.
EnqueueRequestForOwner - Enqueues a reconcile.Request containing the Name and Namespace of the Owner of the object in the Event.
This will cause owner of the object that was the source of the Event (e.g. the owner object that created the object)
to be reconciled.
EnqueueRequestsFromMapFunc - Enqueues reconcile.Requests resulting from a user provided transformation function run against the
object in the Event. This will cause an arbitrary collection of objects (defined from a transformation of the
source object) to be reconciled.
*/
package handler

View File

@@ -0,0 +1,90 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package handler
import (
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/util/workqueue"
"sigs.k8s.io/controller-runtime/pkg/event"
logf "sigs.k8s.io/controller-runtime/pkg/internal/log"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
)
var enqueueLog = logf.RuntimeLog.WithName("eventhandler").WithName("EnqueueRequestForObject")
type empty struct{}
var _ EventHandler = &EnqueueRequestForObject{}
// EnqueueRequestForObject enqueues a Request containing the Name and Namespace of the object that is the source of the Event.
// (e.g. the created / deleted / updated objects Name and Namespace). handler.EnqueueRequestForObject is used by almost all
// Controllers that have associated Resources (e.g. CRDs) to reconcile the associated Resource.
type EnqueueRequestForObject struct{}
// Create implements EventHandler.
func (e *EnqueueRequestForObject) Create(evt event.CreateEvent, q workqueue.RateLimitingInterface) {
if evt.Object == nil {
enqueueLog.Error(nil, "CreateEvent received with no metadata", "event", evt)
return
}
q.Add(reconcile.Request{NamespacedName: types.NamespacedName{
Name: evt.Object.GetName(),
Namespace: evt.Object.GetNamespace(),
}})
}
// Update implements EventHandler.
func (e *EnqueueRequestForObject) Update(evt event.UpdateEvent, q workqueue.RateLimitingInterface) {
switch {
case evt.ObjectNew != nil:
q.Add(reconcile.Request{NamespacedName: types.NamespacedName{
Name: evt.ObjectNew.GetName(),
Namespace: evt.ObjectNew.GetNamespace(),
}})
case evt.ObjectOld != nil:
q.Add(reconcile.Request{NamespacedName: types.NamespacedName{
Name: evt.ObjectOld.GetName(),
Namespace: evt.ObjectOld.GetNamespace(),
}})
default:
enqueueLog.Error(nil, "UpdateEvent received with no metadata", "event", evt)
}
}
// Delete implements EventHandler.
func (e *EnqueueRequestForObject) Delete(evt event.DeleteEvent, q workqueue.RateLimitingInterface) {
if evt.Object == nil {
enqueueLog.Error(nil, "DeleteEvent received with no metadata", "event", evt)
return
}
q.Add(reconcile.Request{NamespacedName: types.NamespacedName{
Name: evt.Object.GetName(),
Namespace: evt.Object.GetNamespace(),
}})
}
// Generic implements EventHandler.
func (e *EnqueueRequestForObject) Generic(evt event.GenericEvent, q workqueue.RateLimitingInterface) {
if evt.Object == nil {
enqueueLog.Error(nil, "GenericEvent received with no metadata", "event", evt)
return
}
q.Add(reconcile.Request{NamespacedName: types.NamespacedName{
Name: evt.Object.GetName(),
Namespace: evt.Object.GetNamespace(),
}})
}

View File

@@ -0,0 +1,97 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package handler
import (
"k8s.io/client-go/util/workqueue"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/event"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
"sigs.k8s.io/controller-runtime/pkg/runtime/inject"
)
// MapFunc is the signature required for enqueueing requests from a generic function.
// This type is usually used with EnqueueRequestsFromMapFunc when registering an event handler.
type MapFunc func(client.Object) []reconcile.Request
// EnqueueRequestsFromMapFunc enqueues Requests by running a transformation function that outputs a collection
// of reconcile.Requests on each Event. The reconcile.Requests may be for an arbitrary set of objects
// defined by some user specified transformation of the source Event. (e.g. trigger Reconciler for a set of objects
// in response to a cluster resize event caused by adding or deleting a Node)
//
// EnqueueRequestsFromMapFunc is frequently used to fan-out updates from one object to one or more other
// objects of a differing type.
//
// For UpdateEvents which contain both a new and old object, the transformation function is run on both
// objects and both sets of Requests are enqueue.
func EnqueueRequestsFromMapFunc(fn MapFunc) EventHandler {
return &enqueueRequestsFromMapFunc{
toRequests: fn,
}
}
var _ EventHandler = &enqueueRequestsFromMapFunc{}
type enqueueRequestsFromMapFunc struct {
// Mapper transforms the argument into a slice of keys to be reconciled
toRequests MapFunc
}
// Create implements EventHandler.
func (e *enqueueRequestsFromMapFunc) Create(evt event.CreateEvent, q workqueue.RateLimitingInterface) {
reqs := map[reconcile.Request]empty{}
e.mapAndEnqueue(q, evt.Object, reqs)
}
// Update implements EventHandler.
func (e *enqueueRequestsFromMapFunc) Update(evt event.UpdateEvent, q workqueue.RateLimitingInterface) {
reqs := map[reconcile.Request]empty{}
e.mapAndEnqueue(q, evt.ObjectOld, reqs)
e.mapAndEnqueue(q, evt.ObjectNew, reqs)
}
// Delete implements EventHandler.
func (e *enqueueRequestsFromMapFunc) Delete(evt event.DeleteEvent, q workqueue.RateLimitingInterface) {
reqs := map[reconcile.Request]empty{}
e.mapAndEnqueue(q, evt.Object, reqs)
}
// Generic implements EventHandler.
func (e *enqueueRequestsFromMapFunc) Generic(evt event.GenericEvent, q workqueue.RateLimitingInterface) {
reqs := map[reconcile.Request]empty{}
e.mapAndEnqueue(q, evt.Object, reqs)
}
func (e *enqueueRequestsFromMapFunc) mapAndEnqueue(q workqueue.RateLimitingInterface, object client.Object, reqs map[reconcile.Request]empty) {
for _, req := range e.toRequests(object) {
_, ok := reqs[req]
if !ok {
q.Add(req)
reqs[req] = empty{}
}
}
}
// EnqueueRequestsFromMapFunc can inject fields into the mapper.
// InjectFunc implements inject.Injector.
func (e *enqueueRequestsFromMapFunc) InjectFunc(f inject.Func) error {
if f == nil {
return nil
}
return f(e.toRequests)
}

View File

@@ -0,0 +1,189 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package handler
import (
"fmt"
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/util/workqueue"
"sigs.k8s.io/controller-runtime/pkg/event"
logf "sigs.k8s.io/controller-runtime/pkg/internal/log"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
"sigs.k8s.io/controller-runtime/pkg/runtime/inject"
)
var _ EventHandler = &EnqueueRequestForOwner{}
var log = logf.RuntimeLog.WithName("eventhandler").WithName("EnqueueRequestForOwner")
// EnqueueRequestForOwner enqueues Requests for the Owners of an object. E.g. the object that created
// the object that was the source of the Event.
//
// If a ReplicaSet creates Pods, users may reconcile the ReplicaSet in response to Pod Events using:
//
// - a source.Kind Source with Type of Pod.
//
// - a handler.EnqueueRequestForOwner EventHandler with an OwnerType of ReplicaSet and IsController set to true.
type EnqueueRequestForOwner struct {
// OwnerType is the type of the Owner object to look for in OwnerReferences. Only Group and Kind are compared.
OwnerType runtime.Object
// IsController if set will only look at the first OwnerReference with Controller: true.
IsController bool
// groupKind is the cached Group and Kind from OwnerType
groupKind schema.GroupKind
// mapper maps GroupVersionKinds to Resources
mapper meta.RESTMapper
}
// Create implements EventHandler.
func (e *EnqueueRequestForOwner) Create(evt event.CreateEvent, q workqueue.RateLimitingInterface) {
reqs := map[reconcile.Request]empty{}
e.getOwnerReconcileRequest(evt.Object, reqs)
for req := range reqs {
q.Add(req)
}
}
// Update implements EventHandler.
func (e *EnqueueRequestForOwner) Update(evt event.UpdateEvent, q workqueue.RateLimitingInterface) {
reqs := map[reconcile.Request]empty{}
e.getOwnerReconcileRequest(evt.ObjectOld, reqs)
e.getOwnerReconcileRequest(evt.ObjectNew, reqs)
for req := range reqs {
q.Add(req)
}
}
// Delete implements EventHandler.
func (e *EnqueueRequestForOwner) Delete(evt event.DeleteEvent, q workqueue.RateLimitingInterface) {
reqs := map[reconcile.Request]empty{}
e.getOwnerReconcileRequest(evt.Object, reqs)
for req := range reqs {
q.Add(req)
}
}
// Generic implements EventHandler.
func (e *EnqueueRequestForOwner) Generic(evt event.GenericEvent, q workqueue.RateLimitingInterface) {
reqs := map[reconcile.Request]empty{}
e.getOwnerReconcileRequest(evt.Object, reqs)
for req := range reqs {
q.Add(req)
}
}
// parseOwnerTypeGroupKind parses the OwnerType into a Group and Kind and caches the result. Returns false
// if the OwnerType could not be parsed using the scheme.
func (e *EnqueueRequestForOwner) parseOwnerTypeGroupKind(scheme *runtime.Scheme) error {
// Get the kinds of the type
kinds, _, err := scheme.ObjectKinds(e.OwnerType)
if err != nil {
log.Error(err, "Could not get ObjectKinds for OwnerType", "owner type", fmt.Sprintf("%T", e.OwnerType))
return err
}
// Expect only 1 kind. If there is more than one kind this is probably an edge case such as ListOptions.
if len(kinds) != 1 {
err := fmt.Errorf("expected exactly 1 kind for OwnerType %T, but found %s kinds", e.OwnerType, kinds)
log.Error(nil, "expected exactly 1 kind for OwnerType", "owner type", fmt.Sprintf("%T", e.OwnerType), "kinds", kinds)
return err
}
// Cache the Group and Kind for the OwnerType
e.groupKind = schema.GroupKind{Group: kinds[0].Group, Kind: kinds[0].Kind}
return nil
}
// getOwnerReconcileRequest looks at object and builds a map of reconcile.Request to reconcile
// owners of object that match e.OwnerType.
func (e *EnqueueRequestForOwner) getOwnerReconcileRequest(object metav1.Object, result map[reconcile.Request]empty) {
// Iterate through the OwnerReferences looking for a match on Group and Kind against what was requested
// by the user
for _, ref := range e.getOwnersReferences(object) {
// Parse the Group out of the OwnerReference to compare it to what was parsed out of the requested OwnerType
refGV, err := schema.ParseGroupVersion(ref.APIVersion)
if err != nil {
log.Error(err, "Could not parse OwnerReference APIVersion",
"api version", ref.APIVersion)
return
}
// Compare the OwnerReference Group and Kind against the OwnerType Group and Kind specified by the user.
// If the two match, create a Request for the objected referred to by
// the OwnerReference. Use the Name from the OwnerReference and the Namespace from the
// object in the event.
if ref.Kind == e.groupKind.Kind && refGV.Group == e.groupKind.Group {
// Match found - add a Request for the object referred to in the OwnerReference
request := reconcile.Request{NamespacedName: types.NamespacedName{
Name: ref.Name,
}}
// if owner is not namespaced then we should set the namespace to the empty
mapping, err := e.mapper.RESTMapping(e.groupKind, refGV.Version)
if err != nil {
log.Error(err, "Could not retrieve rest mapping", "kind", e.groupKind)
return
}
if mapping.Scope.Name() != meta.RESTScopeNameRoot {
request.Namespace = object.GetNamespace()
}
result[request] = empty{}
}
}
}
// getOwnersReferences returns the OwnerReferences for an object as specified by the EnqueueRequestForOwner
// - if IsController is true: only take the Controller OwnerReference (if found)
// - if IsController is false: take all OwnerReferences.
func (e *EnqueueRequestForOwner) getOwnersReferences(object metav1.Object) []metav1.OwnerReference {
if object == nil {
return nil
}
// If not filtered as Controller only, then use all the OwnerReferences
if !e.IsController {
return object.GetOwnerReferences()
}
// If filtered to a Controller, only take the Controller OwnerReference
if ownerRef := metav1.GetControllerOf(object); ownerRef != nil {
return []metav1.OwnerReference{*ownerRef}
}
// No Controller OwnerReference found
return nil
}
var _ inject.Scheme = &EnqueueRequestForOwner{}
// InjectScheme is called by the Controller to provide a singleton scheme to the EnqueueRequestForOwner.
func (e *EnqueueRequestForOwner) InjectScheme(s *runtime.Scheme) error {
return e.parseOwnerTypeGroupKind(s)
}
var _ inject.Mapper = &EnqueueRequestForOwner{}
// InjectMapper is called by the Controller to provide the rest mapper used by the manager.
func (e *EnqueueRequestForOwner) InjectMapper(m meta.RESTMapper) error {
e.mapper = m
return nil
}

View File

@@ -0,0 +1,104 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package handler
import (
"k8s.io/client-go/util/workqueue"
"sigs.k8s.io/controller-runtime/pkg/event"
)
// EventHandler enqueues reconcile.Requests in response to events (e.g. Pod Create). EventHandlers map an Event
// for one object to trigger Reconciles for either the same object or different objects - e.g. if there is an
// Event for object with type Foo (using source.KindSource) then reconcile one or more object(s) with type Bar.
//
// Identical reconcile.Requests will be batched together through the queuing mechanism before reconcile is called.
//
// * Use EnqueueRequestForObject to reconcile the object the event is for
// - do this for events for the type the Controller Reconciles. (e.g. Deployment for a Deployment Controller)
//
// * Use EnqueueRequestForOwner to reconcile the owner of the object the event is for
// - do this for events for the types the Controller creates. (e.g. ReplicaSets created by a Deployment Controller)
//
// * Use EnqueueRequestsFromMapFunc to transform an event for an object to a reconcile of an object
// of a different type - do this for events for types the Controller may be interested in, but doesn't create.
// (e.g. If Foo responds to cluster size events, map Node events to Foo objects.)
//
// Unless you are implementing your own EventHandler, you can ignore the functions on the EventHandler interface.
// Most users shouldn't need to implement their own EventHandler.
type EventHandler interface {
// Create is called in response to an create event - e.g. Pod Creation.
Create(event.CreateEvent, workqueue.RateLimitingInterface)
// Update is called in response to an update event - e.g. Pod Updated.
Update(event.UpdateEvent, workqueue.RateLimitingInterface)
// Delete is called in response to a delete event - e.g. Pod Deleted.
Delete(event.DeleteEvent, workqueue.RateLimitingInterface)
// Generic is called in response to an event of an unknown type or a synthetic event triggered as a cron or
// external trigger request - e.g. reconcile Autoscaling, or a Webhook.
Generic(event.GenericEvent, workqueue.RateLimitingInterface)
}
var _ EventHandler = Funcs{}
// Funcs implements EventHandler.
type Funcs struct {
// Create is called in response to an add event. Defaults to no-op.
// RateLimitingInterface is used to enqueue reconcile.Requests.
CreateFunc func(event.CreateEvent, workqueue.RateLimitingInterface)
// Update is called in response to an update event. Defaults to no-op.
// RateLimitingInterface is used to enqueue reconcile.Requests.
UpdateFunc func(event.UpdateEvent, workqueue.RateLimitingInterface)
// Delete is called in response to a delete event. Defaults to no-op.
// RateLimitingInterface is used to enqueue reconcile.Requests.
DeleteFunc func(event.DeleteEvent, workqueue.RateLimitingInterface)
// GenericFunc is called in response to a generic event. Defaults to no-op.
// RateLimitingInterface is used to enqueue reconcile.Requests.
GenericFunc func(event.GenericEvent, workqueue.RateLimitingInterface)
}
// Create implements EventHandler.
func (h Funcs) Create(e event.CreateEvent, q workqueue.RateLimitingInterface) {
if h.CreateFunc != nil {
h.CreateFunc(e, q)
}
}
// Delete implements EventHandler.
func (h Funcs) Delete(e event.DeleteEvent, q workqueue.RateLimitingInterface) {
if h.DeleteFunc != nil {
h.DeleteFunc(e, q)
}
}
// Update implements EventHandler.
func (h Funcs) Update(e event.UpdateEvent, q workqueue.RateLimitingInterface) {
if h.UpdateFunc != nil {
h.UpdateFunc(e, q)
}
}
// Generic implements EventHandler.
func (h Funcs) Generic(e event.GenericEvent, q workqueue.RateLimitingInterface) {
if h.GenericFunc != nil {
h.GenericFunc(e, q)
}
}

View File

@@ -0,0 +1,32 @@
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package healthz contains helpers from supporting liveness and readiness endpoints.
// (often referred to as healthz and readyz, respectively).
//
// This package draws heavily from the apiserver's healthz package
// ( https://github.com/kubernetes/apiserver/tree/master/pkg/server/healthz )
// but has some changes to bring it in line with controller-runtime's style.
//
// The main entrypoint is the Handler -- this serves both aggregated health status
// and individual health check endpoints.
package healthz
import (
logf "sigs.k8s.io/controller-runtime/pkg/internal/log"
)
var log = logf.RuntimeLog.WithName("healthz")

View File

@@ -0,0 +1,206 @@
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package healthz
import (
"fmt"
"net/http"
"path"
"sort"
"strings"
"k8s.io/apimachinery/pkg/util/sets"
)
// Handler is an http.Handler that aggregates the results of the given
// checkers to the root path, and supports calling individual checkers on
// subpaths of the name of the checker.
//
// Adding checks on the fly is *not* threadsafe -- use a wrapper.
type Handler struct {
Checks map[string]Checker
}
// checkStatus holds the output of a particular check.
type checkStatus struct {
name string
healthy bool
excluded bool
}
func (h *Handler) serveAggregated(resp http.ResponseWriter, req *http.Request) {
failed := false
excluded := getExcludedChecks(req)
parts := make([]checkStatus, 0, len(h.Checks))
// calculate the results...
for checkName, check := range h.Checks {
// no-op the check if we've specified we want to exclude the check
if excluded.Has(checkName) {
excluded.Delete(checkName)
parts = append(parts, checkStatus{name: checkName, healthy: true, excluded: true})
continue
}
if err := check(req); err != nil {
log.V(1).Info("healthz check failed", "checker", checkName, "error", err)
parts = append(parts, checkStatus{name: checkName, healthy: false})
failed = true
} else {
parts = append(parts, checkStatus{name: checkName, healthy: true})
}
}
// ...default a check if none is present...
if len(h.Checks) == 0 {
parts = append(parts, checkStatus{name: "ping", healthy: true})
}
for _, c := range excluded.List() {
log.V(1).Info("cannot exclude health check, no matches for it", "checker", c)
}
// ...sort to be consistent...
sort.Slice(parts, func(i, j int) bool { return parts[i].name < parts[j].name })
// ...and write out the result
// TODO(directxman12): this should also accept a request for JSON content (via a accept header)
_, forceVerbose := req.URL.Query()["verbose"]
writeStatusesAsText(resp, parts, excluded, failed, forceVerbose)
}
// writeStatusAsText writes out the given check statuses in some semi-arbitrary
// bespoke text format that we copied from Kubernetes. unknownExcludes lists
// any checks that the user requested to have excluded, but weren't actually
// known checks. writeStatusAsText is always verbose on failure, and can be
// forced to be verbose on success using the given argument.
func writeStatusesAsText(resp http.ResponseWriter, parts []checkStatus, unknownExcludes sets.String, failed, forceVerbose bool) {
resp.Header().Set("Content-Type", "text/plain; charset=utf-8")
resp.Header().Set("X-Content-Type-Options", "nosniff")
// always write status code first
if failed {
resp.WriteHeader(http.StatusInternalServerError)
} else {
resp.WriteHeader(http.StatusOK)
}
// shortcut for easy non-verbose success
if !failed && !forceVerbose {
fmt.Fprint(resp, "ok")
return
}
// we're always verbose on failure, so from this point on we're guaranteed to be verbose
for _, checkOut := range parts {
switch {
case checkOut.excluded:
fmt.Fprintf(resp, "[+]%s excluded: ok\n", checkOut.name)
case checkOut.healthy:
fmt.Fprintf(resp, "[+]%s ok\n", checkOut.name)
default:
// don't include the error since this endpoint is public. If someone wants more detail
// they should have explicit permission to the detailed checks.
fmt.Fprintf(resp, "[-]%s failed: reason withheld\n", checkOut.name)
}
}
if unknownExcludes.Len() > 0 {
fmt.Fprintf(resp, "warn: some health checks cannot be excluded: no matches for %s\n", formatQuoted(unknownExcludes.List()...))
}
if failed {
log.Info("healthz check failed", "statuses", parts)
fmt.Fprintf(resp, "healthz check failed\n")
} else {
fmt.Fprint(resp, "healthz check passed\n")
}
}
func (h *Handler) ServeHTTP(resp http.ResponseWriter, req *http.Request) {
// clean up the request (duplicating the internal logic of http.ServeMux a bit)
// clean up the path a bit
reqPath := req.URL.Path
if reqPath == "" || reqPath[0] != '/' {
reqPath = "/" + reqPath
}
// path.Clean removes the trailing slash except for root for us
// (which is fine, since we're only serving one layer of sub-paths)
reqPath = path.Clean(reqPath)
// either serve the root endpoint...
if reqPath == "/" {
h.serveAggregated(resp, req)
return
}
// ...the default check (if nothing else is present)...
if len(h.Checks) == 0 && reqPath[1:] == "ping" {
CheckHandler{Checker: Ping}.ServeHTTP(resp, req)
return
}
// ...or an individual checker
checkName := reqPath[1:] // ignore the leading slash
checker, known := h.Checks[checkName]
if !known {
http.NotFoundHandler().ServeHTTP(resp, req)
return
}
CheckHandler{Checker: checker}.ServeHTTP(resp, req)
}
// CheckHandler is an http.Handler that serves a health check endpoint at the root path,
// based on its checker.
type CheckHandler struct {
Checker
}
func (h CheckHandler) ServeHTTP(resp http.ResponseWriter, req *http.Request) {
if err := h.Checker(req); err != nil {
http.Error(resp, fmt.Sprintf("internal server error: %v", err), http.StatusInternalServerError)
} else {
fmt.Fprint(resp, "ok")
}
}
// Checker knows how to perform a health check.
type Checker func(req *http.Request) error
// Ping returns true automatically when checked.
var Ping Checker = func(_ *http.Request) error { return nil }
// getExcludedChecks extracts the health check names to be excluded from the query param.
func getExcludedChecks(r *http.Request) sets.String {
checks, found := r.URL.Query()["exclude"]
if found {
return sets.NewString(checks...)
}
return sets.NewString()
}
// formatQuoted returns a formatted string of the health check names,
// preserving the order passed in.
func formatQuoted(names ...string) string {
quoted := make([]string, 0, len(names))
for _, name := range names {
quoted = append(quoted, fmt.Sprintf("%q", name))
}
return strings.Join(quoted, ",")
}

View File

@@ -0,0 +1,360 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package controller
import (
"context"
"errors"
"fmt"
"sync"
"time"
"github.com/go-logr/logr"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/client-go/util/workqueue"
"sigs.k8s.io/controller-runtime/pkg/handler"
ctrlmetrics "sigs.k8s.io/controller-runtime/pkg/internal/controller/metrics"
logf "sigs.k8s.io/controller-runtime/pkg/log"
"sigs.k8s.io/controller-runtime/pkg/predicate"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
"sigs.k8s.io/controller-runtime/pkg/runtime/inject"
"sigs.k8s.io/controller-runtime/pkg/source"
)
var _ inject.Injector = &Controller{}
// Controller implements controller.Controller.
type Controller struct {
// Name is used to uniquely identify a Controller in tracing, logging and monitoring. Name is required.
Name string
// MaxConcurrentReconciles is the maximum number of concurrent Reconciles which can be run. Defaults to 1.
MaxConcurrentReconciles int
// Reconciler is a function that can be called at any time with the Name / Namespace of an object and
// ensures that the state of the system matches the state specified in the object.
// Defaults to the DefaultReconcileFunc.
Do reconcile.Reconciler
// MakeQueue constructs the queue for this controller once the controller is ready to start.
// This exists because the standard Kubernetes workqueues start themselves immediately, which
// leads to goroutine leaks if something calls controller.New repeatedly.
MakeQueue func() workqueue.RateLimitingInterface
// Queue is an listeningQueue that listens for events from Informers and adds object keys to
// the Queue for processing
Queue workqueue.RateLimitingInterface
// SetFields is used to inject dependencies into other objects such as Sources, EventHandlers and Predicates
// Deprecated: the caller should handle injected fields itself.
SetFields func(i interface{}) error
// mu is used to synchronize Controller setup
mu sync.Mutex
// Started is true if the Controller has been Started
Started bool
// ctx is the context that was passed to Start() and used when starting watches.
//
// According to the docs, contexts should not be stored in a struct: https://golang.org/pkg/context,
// while we usually always strive to follow best practices, we consider this a legacy case and it should
// undergo a major refactoring and redesign to allow for context to not be stored in a struct.
ctx context.Context
// CacheSyncTimeout refers to the time limit set on waiting for cache to sync
// Defaults to 2 minutes if not set.
CacheSyncTimeout time.Duration
// startWatches maintains a list of sources, handlers, and predicates to start when the controller is started.
startWatches []watchDescription
// LogConstructor is used to construct a logger to then log messages to users during reconciliation,
// or for example when a watch is started.
// Note: LogConstructor has to be able to handle nil requests as we are also using it
// outside the context of a reconciliation.
LogConstructor func(request *reconcile.Request) logr.Logger
// RecoverPanic indicates whether the panic caused by reconcile should be recovered.
RecoverPanic bool
}
// watchDescription contains all the information necessary to start a watch.
type watchDescription struct {
src source.Source
handler handler.EventHandler
predicates []predicate.Predicate
}
// Reconcile implements reconcile.Reconciler.
func (c *Controller) Reconcile(ctx context.Context, req reconcile.Request) (_ reconcile.Result, err error) {
defer func() {
if r := recover(); r != nil {
if c.RecoverPanic {
for _, fn := range utilruntime.PanicHandlers {
fn(r)
}
err = fmt.Errorf("panic: %v [recovered]", r)
return
}
log := logf.FromContext(ctx)
log.Info(fmt.Sprintf("Observed a panic in reconciler: %v", r))
panic(r)
}
}()
return c.Do.Reconcile(ctx, req)
}
// Watch implements controller.Controller.
func (c *Controller) Watch(src source.Source, evthdler handler.EventHandler, prct ...predicate.Predicate) error {
c.mu.Lock()
defer c.mu.Unlock()
// Inject Cache into arguments
if err := c.SetFields(src); err != nil {
return err
}
if err := c.SetFields(evthdler); err != nil {
return err
}
for _, pr := range prct {
if err := c.SetFields(pr); err != nil {
return err
}
}
// Controller hasn't started yet, store the watches locally and return.
//
// These watches are going to be held on the controller struct until the manager or user calls Start(...).
if !c.Started {
c.startWatches = append(c.startWatches, watchDescription{src: src, handler: evthdler, predicates: prct})
return nil
}
c.LogConstructor(nil).Info("Starting EventSource", "source", src)
return src.Start(c.ctx, evthdler, c.Queue, prct...)
}
// Start implements controller.Controller.
func (c *Controller) Start(ctx context.Context) error {
// use an IIFE to get proper lock handling
// but lock outside to get proper handling of the queue shutdown
c.mu.Lock()
if c.Started {
return errors.New("controller was started more than once. This is likely to be caused by being added to a manager multiple times")
}
c.initMetrics()
// Set the internal context.
c.ctx = ctx
c.Queue = c.MakeQueue()
go func() {
<-ctx.Done()
c.Queue.ShutDown()
}()
wg := &sync.WaitGroup{}
err := func() error {
defer c.mu.Unlock()
// TODO(pwittrock): Reconsider HandleCrash
defer utilruntime.HandleCrash()
// NB(directxman12): launch the sources *before* trying to wait for the
// caches to sync so that they have a chance to register their intendeded
// caches.
for _, watch := range c.startWatches {
c.LogConstructor(nil).Info("Starting EventSource", "source", fmt.Sprintf("%s", watch.src))
if err := watch.src.Start(ctx, watch.handler, c.Queue, watch.predicates...); err != nil {
return err
}
}
// Start the SharedIndexInformer factories to begin populating the SharedIndexInformer caches
c.LogConstructor(nil).Info("Starting Controller")
for _, watch := range c.startWatches {
syncingSource, ok := watch.src.(source.SyncingSource)
if !ok {
continue
}
if err := func() error {
// use a context with timeout for launching sources and syncing caches.
sourceStartCtx, cancel := context.WithTimeout(ctx, c.CacheSyncTimeout)
defer cancel()
// WaitForSync waits for a definitive timeout, and returns if there
// is an error or a timeout
if err := syncingSource.WaitForSync(sourceStartCtx); err != nil {
err := fmt.Errorf("failed to wait for %s caches to sync: %w", c.Name, err)
c.LogConstructor(nil).Error(err, "Could not wait for Cache to sync")
return err
}
return nil
}(); err != nil {
return err
}
}
// All the watches have been started, we can reset the local slice.
//
// We should never hold watches more than necessary, each watch source can hold a backing cache,
// which won't be garbage collected if we hold a reference to it.
c.startWatches = nil
// Launch workers to process resources
c.LogConstructor(nil).Info("Starting workers", "worker count", c.MaxConcurrentReconciles)
wg.Add(c.MaxConcurrentReconciles)
for i := 0; i < c.MaxConcurrentReconciles; i++ {
go func() {
defer wg.Done()
// Run a worker thread that just dequeues items, processes them, and marks them done.
// It enforces that the reconcileHandler is never invoked concurrently with the same object.
for c.processNextWorkItem(ctx) {
}
}()
}
c.Started = true
return nil
}()
if err != nil {
return err
}
<-ctx.Done()
c.LogConstructor(nil).Info("Shutdown signal received, waiting for all workers to finish")
wg.Wait()
c.LogConstructor(nil).Info("All workers finished")
return nil
}
// processNextWorkItem will read a single work item off the workqueue and
// attempt to process it, by calling the reconcileHandler.
func (c *Controller) processNextWorkItem(ctx context.Context) bool {
obj, shutdown := c.Queue.Get()
if shutdown {
// Stop working
return false
}
// We call Done here so the workqueue knows we have finished
// processing this item. We also must remember to call Forget if we
// do not want this work item being re-queued. For example, we do
// not call Forget if a transient error occurs, instead the item is
// put back on the workqueue and attempted again after a back-off
// period.
defer c.Queue.Done(obj)
ctrlmetrics.ActiveWorkers.WithLabelValues(c.Name).Add(1)
defer ctrlmetrics.ActiveWorkers.WithLabelValues(c.Name).Add(-1)
c.reconcileHandler(ctx, obj)
return true
}
const (
labelError = "error"
labelRequeueAfter = "requeue_after"
labelRequeue = "requeue"
labelSuccess = "success"
)
func (c *Controller) initMetrics() {
ctrlmetrics.ActiveWorkers.WithLabelValues(c.Name).Set(0)
ctrlmetrics.ReconcileErrors.WithLabelValues(c.Name).Add(0)
ctrlmetrics.ReconcileTotal.WithLabelValues(c.Name, labelError).Add(0)
ctrlmetrics.ReconcileTotal.WithLabelValues(c.Name, labelRequeueAfter).Add(0)
ctrlmetrics.ReconcileTotal.WithLabelValues(c.Name, labelRequeue).Add(0)
ctrlmetrics.ReconcileTotal.WithLabelValues(c.Name, labelSuccess).Add(0)
ctrlmetrics.WorkerCount.WithLabelValues(c.Name).Set(float64(c.MaxConcurrentReconciles))
}
func (c *Controller) reconcileHandler(ctx context.Context, obj interface{}) {
// Update metrics after processing each item
reconcileStartTS := time.Now()
defer func() {
c.updateMetrics(time.Since(reconcileStartTS))
}()
// Make sure that the object is a valid request.
req, ok := obj.(reconcile.Request)
if !ok {
// As the item in the workqueue is actually invalid, we call
// Forget here else we'd go into a loop of attempting to
// process a work item that is invalid.
c.Queue.Forget(obj)
c.LogConstructor(nil).Error(nil, "Queue item was not a Request", "type", fmt.Sprintf("%T", obj), "value", obj)
// Return true, don't take a break
return
}
log := c.LogConstructor(&req)
log = log.WithValues("reconcileID", uuid.NewUUID())
ctx = logf.IntoContext(ctx, log)
// RunInformersAndControllers the syncHandler, passing it the Namespace/Name string of the
// resource to be synced.
result, err := c.Reconcile(ctx, req)
switch {
case err != nil:
c.Queue.AddRateLimited(req)
ctrlmetrics.ReconcileErrors.WithLabelValues(c.Name).Inc()
ctrlmetrics.ReconcileTotal.WithLabelValues(c.Name, labelError).Inc()
log.Error(err, "Reconciler error")
case result.RequeueAfter > 0:
// The result.RequeueAfter request will be lost, if it is returned
// along with a non-nil error. But this is intended as
// We need to drive to stable reconcile loops before queuing due
// to result.RequestAfter
c.Queue.Forget(obj)
c.Queue.AddAfter(req, result.RequeueAfter)
ctrlmetrics.ReconcileTotal.WithLabelValues(c.Name, labelRequeueAfter).Inc()
case result.Requeue:
c.Queue.AddRateLimited(req)
ctrlmetrics.ReconcileTotal.WithLabelValues(c.Name, labelRequeue).Inc()
default:
// Finally, if no error occurs we Forget this item so it does not
// get queued again until another change happens.
c.Queue.Forget(obj)
ctrlmetrics.ReconcileTotal.WithLabelValues(c.Name, labelSuccess).Inc()
}
}
// GetLogger returns this controller's logger.
func (c *Controller) GetLogger() logr.Logger {
return c.LogConstructor(nil)
}
// InjectFunc implement SetFields.Injector.
func (c *Controller) InjectFunc(f inject.Func) error {
c.SetFields = f
return nil
}
// updateMetrics updates prometheus metrics within the controller.
func (c *Controller) updateMetrics(reconcileTime time.Duration) {
ctrlmetrics.ReconcileTime.WithLabelValues(c.Name).Observe(reconcileTime.Seconds())
}

View File

@@ -0,0 +1,78 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package metrics
import (
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/collectors"
"sigs.k8s.io/controller-runtime/pkg/metrics"
)
var (
// ReconcileTotal is a prometheus counter metrics which holds the total
// number of reconciliations per controller. It has two labels. controller label refers
// to the controller name and result label refers to the reconcile result i.e
// success, error, requeue, requeue_after.
ReconcileTotal = prometheus.NewCounterVec(prometheus.CounterOpts{
Name: "controller_runtime_reconcile_total",
Help: "Total number of reconciliations per controller",
}, []string{"controller", "result"})
// ReconcileErrors is a prometheus counter metrics which holds the total
// number of errors from the Reconciler.
ReconcileErrors = prometheus.NewCounterVec(prometheus.CounterOpts{
Name: "controller_runtime_reconcile_errors_total",
Help: "Total number of reconciliation errors per controller",
}, []string{"controller"})
// ReconcileTime is a prometheus metric which keeps track of the duration
// of reconciliations.
ReconcileTime = prometheus.NewHistogramVec(prometheus.HistogramOpts{
Name: "controller_runtime_reconcile_time_seconds",
Help: "Length of time per reconciliation per controller",
Buckets: []float64{0.005, 0.01, 0.025, 0.05, 0.1, 0.15, 0.2, 0.25, 0.3, 0.35, 0.4, 0.45, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0,
1.25, 1.5, 1.75, 2.0, 2.5, 3.0, 3.5, 4.0, 4.5, 5, 6, 7, 8, 9, 10, 15, 20, 25, 30, 40, 50, 60},
}, []string{"controller"})
// WorkerCount is a prometheus metric which holds the number of
// concurrent reconciles per controller.
WorkerCount = prometheus.NewGaugeVec(prometheus.GaugeOpts{
Name: "controller_runtime_max_concurrent_reconciles",
Help: "Maximum number of concurrent reconciles per controller",
}, []string{"controller"})
// ActiveWorkers is a prometheus metric which holds the number
// of active workers per controller.
ActiveWorkers = prometheus.NewGaugeVec(prometheus.GaugeOpts{
Name: "controller_runtime_active_workers",
Help: "Number of currently used workers per controller",
}, []string{"controller"})
)
func init() {
metrics.Registry.MustRegister(
ReconcileTotal,
ReconcileErrors,
ReconcileTime,
WorkerCount,
ActiveWorkers,
// expose process metrics like CPU, Memory, file descriptor usage etc.
collectors.NewProcessCollector(collectors.ProcessCollectorOpts{}),
// expose Go runtime metrics like GC stats, memory stats etc.
collectors.NewGoCollector(),
)
}

View File

@@ -0,0 +1,21 @@
/*
Copyright 2021 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package flock is copied from k8s.io/kubernetes/pkg/util/flock to avoid
// importing k8s.io/kubernetes as a dependency.
//
// Provides file locking functionalities on unix systems.
package flock

View File

@@ -0,0 +1,24 @@
/*
Copyright 2021 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package flock
import "errors"
var (
// ErrAlreadyLocked is returned when the file is already locked.
ErrAlreadyLocked = errors.New("the file is already locked")
)

View File

@@ -0,0 +1,24 @@
// +build !linux,!darwin,!freebsd,!openbsd,!netbsd,!dragonfly
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package flock
// Acquire is not implemented on non-unix systems.
func Acquire(path string) error {
return nil
}

View File

@@ -0,0 +1,48 @@
//go:build linux || darwin || freebsd || openbsd || netbsd || dragonfly
// +build linux darwin freebsd openbsd netbsd dragonfly
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package flock
import (
"errors"
"fmt"
"os"
"golang.org/x/sys/unix"
)
// Acquire acquires a lock on a file for the duration of the process. This method
// is reentrant.
func Acquire(path string) error {
fd, err := unix.Open(path, unix.O_CREAT|unix.O_RDWR|unix.O_CLOEXEC, 0600)
if err != nil {
if errors.Is(err, os.ErrExist) {
return fmt.Errorf("cannot lock file %q: %w", path, ErrAlreadyLocked)
}
return err
}
// We don't need to close the fd since we should hold
// it until the process exits.
err = unix.Flock(fd, unix.LOCK_NB|unix.LOCK_EX)
if errors.Is(err, unix.EWOULDBLOCK) { // This condition requires LOCK_NB.
return fmt.Errorf("cannot lock file %q: %w", path, ErrAlreadyLocked)
}
return err
}

View File

@@ -0,0 +1,16 @@
package httpserver
import (
"net/http"
"time"
)
// New returns a new server with sane defaults.
func New(handler http.Handler) *http.Server {
return &http.Server{
Handler: handler,
MaxHeaderBytes: 1 << 20,
IdleTimeout: 90 * time.Second, // matches http.DefaultTransport keep-alive timeout
ReadHeaderTimeout: 32 * time.Second,
}
}

View File

@@ -0,0 +1,32 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package log
import (
"github.com/go-logr/logr"
"sigs.k8s.io/controller-runtime/pkg/log"
)
var (
// RuntimeLog is a base parent logger for use inside controller-runtime.
RuntimeLog logr.Logger
)
func init() {
RuntimeLog = log.Log.WithName("controller-runtime")
}

View File

@@ -0,0 +1,78 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package objectutil
import (
"errors"
"fmt"
apimeta "k8s.io/apimachinery/pkg/api/meta"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"sigs.k8s.io/controller-runtime/pkg/client/apiutil"
)
// FilterWithLabels returns a copy of the items in objs matching labelSel.
func FilterWithLabels(objs []runtime.Object, labelSel labels.Selector) ([]runtime.Object, error) {
outItems := make([]runtime.Object, 0, len(objs))
for _, obj := range objs {
meta, err := apimeta.Accessor(obj)
if err != nil {
return nil, err
}
if labelSel != nil {
lbls := labels.Set(meta.GetLabels())
if !labelSel.Matches(lbls) {
continue
}
}
outItems = append(outItems, obj.DeepCopyObject())
}
return outItems, nil
}
// IsAPINamespaced returns true if the object is namespace scoped.
// For unstructured objects the gvk is found from the object itself.
func IsAPINamespaced(obj runtime.Object, scheme *runtime.Scheme, restmapper apimeta.RESTMapper) (bool, error) {
gvk, err := apiutil.GVKForObject(obj, scheme)
if err != nil {
return false, err
}
return IsAPINamespacedWithGVK(gvk, scheme, restmapper)
}
// IsAPINamespacedWithGVK returns true if the object having the provided
// GVK is namespace scoped.
func IsAPINamespacedWithGVK(gk schema.GroupVersionKind, scheme *runtime.Scheme, restmapper apimeta.RESTMapper) (bool, error) {
restmapping, err := restmapper.RESTMapping(schema.GroupKind{Group: gk.Group, Kind: gk.Kind})
if err != nil {
return false, fmt.Errorf("failed to get restmapping: %w", err)
}
scope := restmapping.Scope.Name()
if scope == "" {
return false, errors.New("scope cannot be identified, empty scope returned")
}
if scope != apimeta.RESTScopeNameRoot {
return true, nil
}
return false, nil
}

View File

@@ -0,0 +1,176 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package recorder
import (
"context"
"fmt"
"sync"
"github.com/go-logr/logr"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/runtime"
corev1client "k8s.io/client-go/kubernetes/typed/core/v1"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/record"
)
// EventBroadcasterProducer makes an event broadcaster, returning
// whether or not the broadcaster should be stopped with the Provider,
// or not (e.g. if it's shared, it shouldn't be stopped with the Provider).
type EventBroadcasterProducer func() (caster record.EventBroadcaster, stopWithProvider bool)
// Provider is a recorder.Provider that records events to the k8s API server
// and to a logr Logger.
type Provider struct {
lock sync.RWMutex
stopped bool
// scheme to specify when creating a recorder
scheme *runtime.Scheme
// logger is the logger to use when logging diagnostic event info
logger logr.Logger
evtClient corev1client.EventInterface
makeBroadcaster EventBroadcasterProducer
broadcasterOnce sync.Once
broadcaster record.EventBroadcaster
stopBroadcaster bool
}
// NB(directxman12): this manually implements Stop instead of Being a runnable because we need to
// stop it *after* everything else shuts down, otherwise we'll cause panics as the leader election
// code finishes up and tries to continue emitting events.
// Stop attempts to stop this provider, stopping the underlying broadcaster
// if the broadcaster asked to be stopped. It kinda tries to honor the given
// context, but the underlying broadcaster has an indefinite wait that doesn't
// return until all queued events are flushed, so this may end up just returning
// before the underlying wait has finished instead of cancelling the wait.
// This is Very Frustrating™.
func (p *Provider) Stop(shutdownCtx context.Context) {
doneCh := make(chan struct{})
go func() {
// technically, this could start the broadcaster, but practically, it's
// almost certainly already been started (e.g. by leader election). We
// need to invoke this to ensure that we don't inadvertently race with
// an invocation of getBroadcaster.
broadcaster := p.getBroadcaster()
if p.stopBroadcaster {
p.lock.Lock()
broadcaster.Shutdown()
p.stopped = true
p.lock.Unlock()
}
close(doneCh)
}()
select {
case <-shutdownCtx.Done():
case <-doneCh:
}
}
// getBroadcaster ensures that a broadcaster is started for this
// provider, and returns it. It's threadsafe.
func (p *Provider) getBroadcaster() record.EventBroadcaster {
// NB(directxman12): this can technically still leak if something calls
// "getBroadcaster" (i.e. Emits an Event) but never calls Start, but if we
// create the broadcaster in start, we could race with other things that
// are started at the same time & want to emit events. The alternative is
// silently swallowing events and more locking, but that seems suboptimal.
p.broadcasterOnce.Do(func() {
broadcaster, stop := p.makeBroadcaster()
broadcaster.StartRecordingToSink(&corev1client.EventSinkImpl{Interface: p.evtClient})
broadcaster.StartEventWatcher(
func(e *corev1.Event) {
p.logger.V(1).Info(e.Message, "type", e.Type, "object", e.InvolvedObject, "reason", e.Reason)
})
p.broadcaster = broadcaster
p.stopBroadcaster = stop
})
return p.broadcaster
}
// NewProvider create a new Provider instance.
func NewProvider(config *rest.Config, scheme *runtime.Scheme, logger logr.Logger, makeBroadcaster EventBroadcasterProducer) (*Provider, error) {
corev1Client, err := corev1client.NewForConfig(config)
if err != nil {
return nil, fmt.Errorf("failed to init client: %w", err)
}
p := &Provider{scheme: scheme, logger: logger, makeBroadcaster: makeBroadcaster, evtClient: corev1Client.Events("")}
return p, nil
}
// GetEventRecorderFor returns an event recorder that broadcasts to this provider's
// broadcaster. All events will be associated with a component of the given name.
func (p *Provider) GetEventRecorderFor(name string) record.EventRecorder {
return &lazyRecorder{
prov: p,
name: name,
}
}
// lazyRecorder is a recorder that doesn't actually instantiate any underlying
// recorder until the first event is emitted.
type lazyRecorder struct {
prov *Provider
name string
recOnce sync.Once
rec record.EventRecorder
}
// ensureRecording ensures that a concrete recorder is populated for this recorder.
func (l *lazyRecorder) ensureRecording() {
l.recOnce.Do(func() {
broadcaster := l.prov.getBroadcaster()
l.rec = broadcaster.NewRecorder(l.prov.scheme, corev1.EventSource{Component: l.name})
})
}
func (l *lazyRecorder) Event(object runtime.Object, eventtype, reason, message string) {
l.ensureRecording()
l.prov.lock.RLock()
if !l.prov.stopped {
l.rec.Event(object, eventtype, reason, message)
}
l.prov.lock.RUnlock()
}
func (l *lazyRecorder) Eventf(object runtime.Object, eventtype, reason, messageFmt string, args ...interface{}) {
l.ensureRecording()
l.prov.lock.RLock()
if !l.prov.stopped {
l.rec.Eventf(object, eventtype, reason, messageFmt, args...)
}
l.prov.lock.RUnlock()
}
func (l *lazyRecorder) AnnotatedEventf(object runtime.Object, annotations map[string]string, eventtype, reason, messageFmt string, args ...interface{}) {
l.ensureRecording()
l.prov.lock.RLock()
if !l.prov.stopped {
l.rec.AnnotatedEventf(object, annotations, eventtype, reason, messageFmt, args...)
}
l.prov.lock.RUnlock()
}

View File

@@ -0,0 +1,142 @@
/*
Copyright 2021 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package addr
import (
"errors"
"fmt"
"io/fs"
"net"
"os"
"path/filepath"
"strings"
"time"
"sigs.k8s.io/controller-runtime/pkg/internal/flock"
)
// TODO(directxman12): interface / release functionality for external port managers
const (
portReserveTime = 2 * time.Minute
portConflictRetry = 100
portFilePrefix = "port-"
)
var (
cacheDir string
)
func init() {
baseDir, err := os.UserCacheDir()
if err == nil {
cacheDir = filepath.Join(baseDir, "kubebuilder-envtest")
err = os.MkdirAll(cacheDir, 0o750)
}
if err != nil {
// Either we didn't get a cache directory, or we can't use it
baseDir = os.TempDir()
cacheDir = filepath.Join(baseDir, "kubebuilder-envtest")
err = os.MkdirAll(cacheDir, 0o750)
}
if err != nil {
panic(err)
}
}
type portCache struct{}
func (c *portCache) add(port int) (bool, error) {
// Remove outdated ports.
if err := fs.WalkDir(os.DirFS(cacheDir), ".", func(path string, d fs.DirEntry, err error) error {
if err != nil {
return err
}
if d.IsDir() || !d.Type().IsRegular() || !strings.HasPrefix(path, portFilePrefix) {
return nil
}
info, err := d.Info()
if err != nil {
// No-op if file no longer exists; may have been deleted by another
// process/thread trying to allocate ports.
if errors.Is(err, fs.ErrNotExist) {
return nil
}
return err
}
if time.Since(info.ModTime()) > portReserveTime {
if err := os.Remove(filepath.Join(cacheDir, path)); err != nil {
// No-op if file no longer exists; may have been deleted by another
// process/thread trying to allocate ports.
if os.IsNotExist(err) {
return nil
}
return err
}
}
return nil
}); err != nil {
return false, err
}
// Try allocating new port, by acquiring a file.
path := fmt.Sprintf("%s/%s%d", cacheDir, portFilePrefix, port)
if err := flock.Acquire(path); errors.Is(err, flock.ErrAlreadyLocked) {
return false, nil
} else if err != nil {
return false, err
}
return true, nil
}
var cache = &portCache{}
func suggest(listenHost string) (*net.TCPListener, int, string, error) {
if listenHost == "" {
listenHost = "localhost"
}
addr, err := net.ResolveTCPAddr("tcp", net.JoinHostPort(listenHost, "0"))
if err != nil {
return nil, -1, "", err
}
l, err := net.ListenTCP("tcp", addr)
if err != nil {
return nil, -1, "", err
}
return l, l.Addr().(*net.TCPAddr).Port,
addr.IP.String(),
nil
}
// Suggest suggests an address a process can listen on. It returns
// a tuple consisting of a free port and the hostname resolved to its IP.
// It makes sure that new port allocated does not conflict with old ports
// allocated within 1 minute.
func Suggest(listenHost string) (int, string, error) {
for i := 0; i < portConflictRetry; i++ {
listener, port, resolvedHost, err := suggest(listenHost)
if err != nil {
return -1, "", err
}
defer listener.Close()
if ok, err := cache.add(port); ok {
return port, resolvedHost, nil
} else if err != nil {
return -1, "", err
}
}
return -1, "", fmt.Errorf("no free ports found after %d retries", portConflictRetry)
}

View File

@@ -0,0 +1,224 @@
/*
Copyright 2021 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package certs
// NB(directxman12): nothing has verified that this has good settings. In fact,
// the setting generated here are probably terrible, but they're fine for integration
// tests. These ABSOLUTELY SHOULD NOT ever be exposed in the public API. They're
// ONLY for use with envtest's ability to configure webhook testing.
// If I didn't otherwise not want to add a dependency on cfssl, I'd just use that.
import (
"crypto"
"crypto/ecdsa"
"crypto/elliptic"
crand "crypto/rand"
"crypto/x509"
"crypto/x509/pkix"
"encoding/pem"
"fmt"
"math/big"
"net"
"time"
certutil "k8s.io/client-go/util/cert"
)
var (
ellipticCurve = elliptic.P256()
bigOne = big.NewInt(1)
)
// CertPair is a private key and certificate for use for client auth, as a CA, or serving.
type CertPair struct {
Key crypto.Signer
Cert *x509.Certificate
}
// CertBytes returns the PEM-encoded version of the certificate for this pair.
func (k CertPair) CertBytes() []byte {
return pem.EncodeToMemory(&pem.Block{
Type: "CERTIFICATE",
Bytes: k.Cert.Raw,
})
}
// AsBytes encodes keypair in the appropriate formats for on-disk storage (PEM and
// PKCS8, respectively).
func (k CertPair) AsBytes() (cert []byte, key []byte, err error) {
cert = k.CertBytes()
rawKeyData, err := x509.MarshalPKCS8PrivateKey(k.Key)
if err != nil {
return nil, nil, fmt.Errorf("unable to encode private key: %w", err)
}
key = pem.EncodeToMemory(&pem.Block{
Type: "PRIVATE KEY",
Bytes: rawKeyData,
})
return cert, key, nil
}
// TinyCA supports signing serving certs and client-certs,
// and can be used as an auth mechanism with envtest.
type TinyCA struct {
CA CertPair
orgName string
nextSerial *big.Int
}
// newPrivateKey generates a new private key of a relatively sane size (see
// rsaKeySize).
func newPrivateKey() (crypto.Signer, error) {
return ecdsa.GenerateKey(ellipticCurve, crand.Reader)
}
// NewTinyCA creates a new a tiny CA utility for provisioning serving certs and client certs FOR TESTING ONLY.
// Don't use this for anything else!
func NewTinyCA() (*TinyCA, error) {
caPrivateKey, err := newPrivateKey()
if err != nil {
return nil, fmt.Errorf("unable to generate private key for CA: %w", err)
}
caCfg := certutil.Config{CommonName: "envtest-environment", Organization: []string{"envtest"}}
caCert, err := certutil.NewSelfSignedCACert(caCfg, caPrivateKey)
if err != nil {
return nil, fmt.Errorf("unable to generate certificate for CA: %w", err)
}
return &TinyCA{
CA: CertPair{Key: caPrivateKey, Cert: caCert},
orgName: "envtest",
nextSerial: big.NewInt(1),
}, nil
}
func (c *TinyCA) makeCert(cfg certutil.Config) (CertPair, error) {
now := time.Now()
key, err := newPrivateKey()
if err != nil {
return CertPair{}, fmt.Errorf("unable to create private key: %w", err)
}
serial := new(big.Int).Set(c.nextSerial)
c.nextSerial.Add(c.nextSerial, bigOne)
template := x509.Certificate{
Subject: pkix.Name{CommonName: cfg.CommonName, Organization: cfg.Organization},
DNSNames: cfg.AltNames.DNSNames,
IPAddresses: cfg.AltNames.IPs,
SerialNumber: serial,
KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature,
ExtKeyUsage: cfg.Usages,
// technically not necessary for testing, but let's set anyway just in case.
NotBefore: now.UTC(),
// 1 week -- the default for cfssl, and just long enough for a
// long-term test, but not too long that anyone would try to use this
// seriously.
NotAfter: now.Add(168 * time.Hour).UTC(),
}
certRaw, err := x509.CreateCertificate(crand.Reader, &template, c.CA.Cert, key.Public(), c.CA.Key)
if err != nil {
return CertPair{}, fmt.Errorf("unable to create certificate: %w", err)
}
cert, err := x509.ParseCertificate(certRaw)
if err != nil {
return CertPair{}, fmt.Errorf("generated invalid certificate, could not parse: %w", err)
}
return CertPair{
Key: key,
Cert: cert,
}, nil
}
// NewServingCert returns a new CertPair for a serving HTTPS on localhost (or other specified names).
func (c *TinyCA) NewServingCert(names ...string) (CertPair, error) {
if len(names) == 0 {
names = []string{"localhost"}
}
dnsNames, ips, err := resolveNames(names)
if err != nil {
return CertPair{}, err
}
return c.makeCert(certutil.Config{
CommonName: "localhost",
Organization: []string{c.orgName},
AltNames: certutil.AltNames{
DNSNames: dnsNames,
IPs: ips,
},
Usages: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth},
})
}
// ClientInfo describes some Kubernetes user for the purposes of creating
// client certificates.
type ClientInfo struct {
// Name is the user name (embedded as the cert's CommonName)
Name string
// Groups are the groups to which this user belongs (embedded as the cert's
// Organization)
Groups []string
}
// NewClientCert produces a new CertPair suitable for use with Kubernetes
// client cert auth with an API server validating based on this CA.
func (c *TinyCA) NewClientCert(user ClientInfo) (CertPair, error) {
return c.makeCert(certutil.Config{
CommonName: user.Name,
Organization: user.Groups,
Usages: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth},
})
}
func resolveNames(names []string) ([]string, []net.IP, error) {
dnsNames := []string{}
ips := []net.IP{}
for _, name := range names {
if name == "" {
continue
}
ip := net.ParseIP(name)
if ip == nil {
dnsNames = append(dnsNames, name)
// Also resolve to IPs.
nameIPs, err := net.LookupHost(name)
if err != nil {
return nil, nil, err
}
for _, nameIP := range nameIPs {
ip = net.ParseIP(nameIP)
if ip != nil {
ips = append(ips, ip)
}
}
} else {
ips = append(ips, ip)
}
}
return dnsNames, ips, nil
}

View File

@@ -0,0 +1,468 @@
/*
Copyright 2021 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package controlplane
import (
"fmt"
"io"
"net/url"
"os"
"path/filepath"
"strconv"
"time"
"sigs.k8s.io/controller-runtime/pkg/internal/testing/addr"
"sigs.k8s.io/controller-runtime/pkg/internal/testing/certs"
"sigs.k8s.io/controller-runtime/pkg/internal/testing/process"
)
const (
// saKeyFile is the name of the service account signing private key file.
saKeyFile = "sa-signer.key"
// saKeyFile is the name of the service account signing public key (cert) file.
saCertFile = "sa-signer.crt"
)
// SecureServing provides/configures how the API server serves on the secure port.
type SecureServing struct {
// ListenAddr contains the host & port to serve on.
//
// Configurable. If unset, it will be defaulted.
process.ListenAddr
// CA contains the CA that signed the API server's serving certificates.
//
// Read-only.
CA []byte
// Authn can be used to provision users, and override what type of
// authentication is used to provision users.
//
// Configurable. If unset, it will be defaulted.
Authn
}
// APIServer knows how to run a kubernetes apiserver.
type APIServer struct {
// URL is the address the ApiServer should listen on for client
// connections.
//
// If set, this will configure the *insecure* serving details.
// If unset, it will contain the insecure port if insecure serving is enabled,
// and otherwise will contain the secure port.
//
// If this is not specified, we default to a random free port on localhost.
//
// Deprecated: use InsecureServing (for the insecure URL) or SecureServing, ideally.
URL *url.URL
// SecurePort is the additional secure port that the APIServer should listen on.
//
// If set, this will override SecureServing.Port.
//
// Deprecated: use SecureServing.
SecurePort int
// SecureServing indicates how the API server will serve on the secure port.
//
// Some parts are configurable. Will be defaulted if unset.
SecureServing
// InsecureServing indicates how the API server will serve on the insecure port.
//
// If unset, the insecure port will be disabled. Set to an empty struct to get
// default values.
//
// Deprecated: does not work with Kubernetes versions 1.20 and above. Use secure
// serving instead.
InsecureServing *process.ListenAddr
// Path is the path to the apiserver binary.
//
// If this is left as the empty string, we will attempt to locate a binary,
// by checking for the TEST_ASSET_KUBE_APISERVER environment variable, and
// the default test assets directory. See the "Binaries" section above (in
// doc.go) for details.
Path string
// Args is a list of arguments which will passed to the APIServer binary.
// Before they are passed on, they will be evaluated as go-template strings.
// This means you can use fields which are defined and exported on this
// APIServer struct (e.g. "--cert-dir={{ .Dir }}").
// Those templates will be evaluated after the defaulting of the APIServer's
// fields has already happened and just before the binary actually gets
// started. Thus you have access to calculated fields like `URL` and others.
//
// If not specified, the minimal set of arguments to run the APIServer will
// be used.
//
// They will be loaded into the same argument set as Configure. Each flag
// will be Append-ed to the configured arguments just before launch.
//
// Deprecated: use Configure instead.
Args []string
// CertDir is a path to a directory containing whatever certificates the
// APIServer will need.
//
// If left unspecified, then the Start() method will create a fresh temporary
// directory, and the Stop() method will clean it up.
CertDir string
// EtcdURL is the URL of the Etcd the APIServer should use.
//
// If this is not specified, the Start() method will return an error.
EtcdURL *url.URL
// StartTimeout, StopTimeout specify the time the APIServer is allowed to
// take when starting and stoppping before an error is emitted.
//
// If not specified, these default to 20 seconds.
StartTimeout time.Duration
StopTimeout time.Duration
// Out, Err specify where APIServer should write its StdOut, StdErr to.
//
// If not specified, the output will be discarded.
Out io.Writer
Err io.Writer
processState *process.State
// args contains the structured arguments to use for running the API server
// Lazily initialized by .Configure(), Defaulted eventually with .defaultArgs()
args *process.Arguments
}
// Configure returns Arguments that may be used to customize the
// flags used to launch the API server. A set of defaults will
// be applied underneath.
func (s *APIServer) Configure() *process.Arguments {
if s.args == nil {
s.args = process.EmptyArguments()
}
return s.args
}
// Start starts the apiserver, waits for it to come up, and returns an error,
// if occurred.
func (s *APIServer) Start() error {
if err := s.prepare(); err != nil {
return err
}
return s.processState.Start(s.Out, s.Err)
}
func (s *APIServer) prepare() error {
if err := s.setProcessState(); err != nil {
return err
}
return s.Authn.Start()
}
// configurePorts configures the serving ports for this API server.
//
// Most of this method currently deals with making the deprecated fields
// take precedence over the new fields.
func (s *APIServer) configurePorts() error {
// prefer the old fields to the new fields if a user set one,
// otherwise, default the new fields and populate the old ones.
// Insecure: URL, InsecureServing
if s.URL != nil {
s.InsecureServing = &process.ListenAddr{
Address: s.URL.Hostname(),
Port: s.URL.Port(),
}
} else if insec := s.InsecureServing; insec != nil {
if insec.Port == "" || insec.Address == "" {
port, host, err := addr.Suggest("")
if err != nil {
return fmt.Errorf("unable to provision unused insecure port: %w", err)
}
s.InsecureServing.Port = strconv.Itoa(port)
s.InsecureServing.Address = host
}
s.URL = s.InsecureServing.URL("http", "")
}
// Secure: SecurePort, SecureServing
if s.SecurePort != 0 {
s.SecureServing.Port = strconv.Itoa(s.SecurePort)
// if we don't have an address, try the insecure address, and otherwise
// default to loopback.
if s.SecureServing.Address == "" {
if s.InsecureServing != nil {
s.SecureServing.Address = s.InsecureServing.Address
} else {
s.SecureServing.Address = "127.0.0.1"
}
}
} else if s.SecureServing.Port == "" || s.SecureServing.Address == "" {
port, host, err := addr.Suggest("")
if err != nil {
return fmt.Errorf("unable to provision unused secure port: %w", err)
}
s.SecureServing.Port = strconv.Itoa(port)
s.SecureServing.Address = host
s.SecurePort = port
}
return nil
}
func (s *APIServer) setProcessState() error {
if s.EtcdURL == nil {
return fmt.Errorf("expected EtcdURL to be configured")
}
var err error
// unconditionally re-set this so we can successfully restart
// TODO(directxman12): we supported this in the past, but do we actually
// want to support re-using an API server object to restart? The loss
// of provisioned users is surprising to say the least.
s.processState = &process.State{
Dir: s.CertDir,
Path: s.Path,
StartTimeout: s.StartTimeout,
StopTimeout: s.StopTimeout,
}
if err := s.processState.Init("kube-apiserver"); err != nil {
return err
}
if err := s.configurePorts(); err != nil {
return err
}
// the secure port will always be on, so use that
s.processState.HealthCheck.URL = *s.SecureServing.URL("https", "/healthz")
s.CertDir = s.processState.Dir
s.Path = s.processState.Path
s.StartTimeout = s.processState.StartTimeout
s.StopTimeout = s.processState.StopTimeout
if err := s.populateAPIServerCerts(); err != nil {
return err
}
if s.SecureServing.Authn == nil {
authn, err := NewCertAuthn()
if err != nil {
return err
}
s.SecureServing.Authn = authn
}
if err := s.Authn.Configure(s.CertDir, s.Configure()); err != nil {
return err
}
// NB(directxman12): insecure port is a mess:
// - 1.19 and below have the `--insecure-port` flag, and require it to be set to zero to
// disable it, otherwise the default will be used and we'll conflict.
// - 1.20 requires the flag to be unset or set to zero, and yells at you if you configure it
// - 1.24 won't have the flag at all...
//
// In an effort to automatically do the right thing during this mess, we do feature discovery
// on the flags, and hope that we've "parsed" them properly.
//
// TODO(directxman12): once we support 1.20 as the min version (might be when 1.24 comes out,
// might be around 1.25 or 1.26), remove this logic and the corresponding line in API server's
// default args.
if err := s.discoverFlags(); err != nil {
return err
}
s.processState.Args, s.Args, err = process.TemplateAndArguments(s.Args, s.Configure(), process.TemplateDefaults{ //nolint:staticcheck
Data: s,
Defaults: s.defaultArgs(),
MinimalDefaults: map[string][]string{
// as per kubernetes-sigs/controller-runtime#641, we need this (we
// probably need other stuff too, but this is the only thing that was
// previously considered a "minimal default")
"service-cluster-ip-range": {"10.0.0.0/24"},
// we need *some* authorization mode for health checks on the secure port,
// so default to RBAC unless the user set something else (in which case
// this'll be ignored due to SliceToArguments using AppendNoDefaults).
"authorization-mode": {"RBAC"},
},
})
if err != nil {
return err
}
return nil
}
// discoverFlags checks for certain flags that *must* be set in certain
// versions, and *must not* be set in others.
func (s *APIServer) discoverFlags() error {
// Present: <1.24, Absent: >= 1.24
present, err := s.processState.CheckFlag("insecure-port")
if err != nil {
return err
}
if !present {
s.Configure().Disable("insecure-port")
}
return nil
}
func (s *APIServer) defaultArgs() map[string][]string {
args := map[string][]string{
"service-cluster-ip-range": {"10.0.0.0/24"},
"allow-privileged": {"true"},
// we're keeping this disabled because if enabled, default SA is
// missing which would force all tests to create one in normal
// apiserver operation this SA is created by controller, but that is
// not run in integration environment
"disable-admission-plugins": {"ServiceAccount"},
"cert-dir": {s.CertDir},
"authorization-mode": {"RBAC"},
"secure-port": {s.SecureServing.Port},
// NB(directxman12): previously we didn't set the bind address for the secure
// port. It *shouldn't* make a difference unless people are doing something really
// funky, but if you start to get bug reports look here ;-)
"bind-address": {s.SecureServing.Address},
// required on 1.20+, fine to leave on for <1.20
"service-account-issuer": {s.SecureServing.URL("https", "/").String()},
"service-account-key-file": {filepath.Join(s.CertDir, saCertFile)},
"service-account-signing-key-file": {filepath.Join(s.CertDir, saKeyFile)},
}
if s.EtcdURL != nil {
args["etcd-servers"] = []string{s.EtcdURL.String()}
}
if s.URL != nil {
args["insecure-port"] = []string{s.URL.Port()}
args["insecure-bind-address"] = []string{s.URL.Hostname()}
} else {
// TODO(directxman12): remove this once 1.21 is the lowest version we support
// (this might be a while, but this line'll break as of 1.24, so see the comment
// in Start
args["insecure-port"] = []string{"0"}
}
return args
}
func (s *APIServer) populateAPIServerCerts() error {
_, statErr := os.Stat(filepath.Join(s.CertDir, "apiserver.crt"))
if !os.IsNotExist(statErr) {
return statErr
}
ca, err := certs.NewTinyCA()
if err != nil {
return err
}
servingCerts, err := ca.NewServingCert()
if err != nil {
return err
}
certData, keyData, err := servingCerts.AsBytes()
if err != nil {
return err
}
if err := os.WriteFile(filepath.Join(s.CertDir, "apiserver.crt"), certData, 0640); err != nil { //nolint:gosec
return err
}
if err := os.WriteFile(filepath.Join(s.CertDir, "apiserver.key"), keyData, 0640); err != nil { //nolint:gosec
return err
}
s.SecureServing.CA = ca.CA.CertBytes()
// service account signing files too
saCA, err := certs.NewTinyCA()
if err != nil {
return err
}
saCert, saKey, err := saCA.CA.AsBytes()
if err != nil {
return err
}
if err := os.WriteFile(filepath.Join(s.CertDir, saCertFile), saCert, 0640); err != nil { //nolint:gosec
return err
}
return os.WriteFile(filepath.Join(s.CertDir, saKeyFile), saKey, 0640) //nolint:gosec
}
// Stop stops this process gracefully, waits for its termination, and cleans up
// the CertDir if necessary.
func (s *APIServer) Stop() error {
if s.processState != nil {
if s.processState.DirNeedsCleaning {
s.CertDir = "" // reset the directory if it was randomly allocated, so that we can safely restart
}
if err := s.processState.Stop(); err != nil {
return err
}
}
return s.Authn.Stop()
}
// APIServerDefaultArgs exposes the default args for the APIServer so that you
// can use those to append your own additional arguments.
//
// Note that these arguments don't handle newer API servers well to due the more
// complex feature detection neeeded. It's recommended that you switch to .Configure
// as you upgrade API server versions.
//
// Deprecated: use APIServer.Configure().
var APIServerDefaultArgs = []string{
"--advertise-address=127.0.0.1",
"--etcd-servers={{ if .EtcdURL }}{{ .EtcdURL.String }}{{ end }}",
"--cert-dir={{ .CertDir }}",
"--insecure-port={{ if .URL }}{{ .URL.Port }}{{else}}0{{ end }}",
"{{ if .URL }}--insecure-bind-address={{ .URL.Hostname }}{{ end }}",
"--secure-port={{ if .SecurePort }}{{ .SecurePort }}{{ end }}",
// we're keeping this disabled because if enabled, default SA is missing which would force all tests to create one
// in normal apiserver operation this SA is created by controller, but that is not run in integration environment
"--disable-admission-plugins=ServiceAccount",
"--service-cluster-ip-range=10.0.0.0/24",
"--allow-privileged=true",
// NB(directxman12): we also enable RBAC if nothing else was enabled
}
// PrepareAPIServer is an internal-only (NEVER SHOULD BE EXPOSED)
// function that sets up the API server just before starting it,
// without actually starting it. This saves time on tests.
//
// NB(directxman12): do not expose this outside of internal -- it's unsafe to
// use, because things like port allocation could race even more than they
// currently do if you later call start!
func PrepareAPIServer(s *APIServer) error {
return s.prepare()
}
// APIServerArguments is an internal-only (NEVER SHOULD BE EXPOSED)
// function that sets up the API server just before starting it,
// without actually starting it. It's public to make testing easier.
//
// NB(directxman12): do not expose this outside of internal.
func APIServerArguments(s *APIServer) []string {
return s.processState.Args
}

View File

@@ -0,0 +1,142 @@
/*
Copyright 2021 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package controlplane
import (
"fmt"
"os"
"path/filepath"
"k8s.io/client-go/rest"
"sigs.k8s.io/controller-runtime/pkg/internal/testing/certs"
"sigs.k8s.io/controller-runtime/pkg/internal/testing/process"
)
// User represents a Kubernetes user.
type User struct {
// Name is the user's Name.
Name string
// Groups are the groups to which the user belongs.
Groups []string
}
// Authn knows how to configure an API server for a particular type of authentication,
// and provision users under that authentication scheme.
//
// The methods must be called in the following order (as presented below in the interface
// for a mnemonic):
//
// 1. Configure
// 2. Start
// 3. AddUsers (0+ calls)
// 4. Stop.
type Authn interface {
// Configure provides the working directory to this authenticator,
// and configures the given API server arguments to make use of this authenticator.
//
// Should be called first.
Configure(workDir string, args *process.Arguments) error
// Start runs this authenticator. Will be called just before API server start.
//
// Must be called after Configure.
Start() error
// AddUser provisions a user, returning a copy of the given base rest.Config
// configured to authenticate as that users.
//
// May only be called while the authenticator is "running".
AddUser(user User, baseCfg *rest.Config) (*rest.Config, error)
// Stop shuts down this authenticator.
Stop() error
}
// CertAuthn is an authenticator (Authn) that makes use of client certificate authn.
type CertAuthn struct {
// ca is the CA used to sign the client certs
ca *certs.TinyCA
// certDir is the directory used to write the CA crt file
// so that the API server can read it.
certDir string
}
// NewCertAuthn creates a new client-cert-based Authn with a new CA.
func NewCertAuthn() (*CertAuthn, error) {
ca, err := certs.NewTinyCA()
if err != nil {
return nil, fmt.Errorf("unable to provision client certificate auth CA: %w", err)
}
return &CertAuthn{
ca: ca,
}, nil
}
// AddUser provisions a new user that's authenticated via certificates, with
// the given uesrname and groups embedded in the certificate as expected by the
// API server.
func (c *CertAuthn) AddUser(user User, baseCfg *rest.Config) (*rest.Config, error) {
certs, err := c.ca.NewClientCert(certs.ClientInfo{
Name: user.Name,
Groups: user.Groups,
})
if err != nil {
return nil, fmt.Errorf("unable to create client certificates for %s: %w", user.Name, err)
}
crt, key, err := certs.AsBytes()
if err != nil {
return nil, fmt.Errorf("unable to serialize client certificates for %s: %w", user.Name, err)
}
cfg := rest.CopyConfig(baseCfg)
cfg.CertData = crt
cfg.KeyData = key
return cfg, nil
}
// caCrtPath returns the path to the on-disk client-cert CA crt file.
func (c *CertAuthn) caCrtPath() string {
return filepath.Join(c.certDir, "client-cert-auth-ca.crt")
}
// Configure provides the working directory to this authenticator,
// and configures the given API server arguments to make use of this authenticator.
func (c *CertAuthn) Configure(workDir string, args *process.Arguments) error {
c.certDir = workDir
args.Set("client-ca-file", c.caCrtPath())
return nil
}
// Start runs this authenticator. Will be called just before API server start.
//
// Must be called after Configure.
func (c *CertAuthn) Start() error {
if len(c.certDir) == 0 {
return fmt.Errorf("start called before configure")
}
caCrt := c.ca.CA.CertBytes()
if err := os.WriteFile(c.caCrtPath(), caCrt, 0640); err != nil { //nolint:gosec
return fmt.Errorf("unable to save the client certificate CA to %s: %w", c.caCrtPath(), err)
}
return nil
}
// Stop shuts down this authenticator.
func (c *CertAuthn) Stop() error {
// no-op -- our workdir is cleaned up for us automatically
return nil
}

View File

@@ -0,0 +1,202 @@
/*
Copyright 2021 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package controlplane
import (
"io"
"net"
"net/url"
"strconv"
"time"
"sigs.k8s.io/controller-runtime/pkg/internal/testing/addr"
"sigs.k8s.io/controller-runtime/pkg/internal/testing/process"
)
// Etcd knows how to run an etcd server.
type Etcd struct {
// URL is the address the Etcd should listen on for client connections.
//
// If this is not specified, we default to a random free port on localhost.
URL *url.URL
// Path is the path to the etcd binary.
//
// If this is left as the empty string, we will attempt to locate a binary,
// by checking for the TEST_ASSET_ETCD environment variable, and the default
// test assets directory. See the "Binaries" section above (in doc.go) for
// details.
Path string
// Args is a list of arguments which will passed to the Etcd binary. Before
// they are passed on, the`y will be evaluated as go-template strings. This
// means you can use fields which are defined and exported on this Etcd
// struct (e.g. "--data-dir={{ .Dir }}").
// Those templates will be evaluated after the defaulting of the Etcd's
// fields has already happened and just before the binary actually gets
// started. Thus you have access to calculated fields like `URL` and others.
//
// If not specified, the minimal set of arguments to run the Etcd will be
// used.
//
// They will be loaded into the same argument set as Configure. Each flag
// will be Append-ed to the configured arguments just before launch.
//
// Deprecated: use Configure instead.
Args []string
// DataDir is a path to a directory in which etcd can store its state.
//
// If left unspecified, then the Start() method will create a fresh temporary
// directory, and the Stop() method will clean it up.
DataDir string
// StartTimeout, StopTimeout specify the time the Etcd is allowed to
// take when starting and stopping before an error is emitted.
//
// If not specified, these default to 20 seconds.
StartTimeout time.Duration
StopTimeout time.Duration
// Out, Err specify where Etcd should write its StdOut, StdErr to.
//
// If not specified, the output will be discarded.
Out io.Writer
Err io.Writer
// processState contains the actual details about this running process
processState *process.State
// args contains the structured arguments to use for running etcd.
// Lazily initialized by .Configure(), Defaulted eventually with .defaultArgs()
args *process.Arguments
// listenPeerURL is the address the Etcd should listen on for peer connections.
// It's automatically generated and a random port is picked during execution.
listenPeerURL *url.URL
}
// Start starts the etcd, waits for it to come up, and returns an error, if one
// occurred.
func (e *Etcd) Start() error {
if err := e.setProcessState(); err != nil {
return err
}
return e.processState.Start(e.Out, e.Err)
}
func (e *Etcd) setProcessState() error {
e.processState = &process.State{
Dir: e.DataDir,
Path: e.Path,
StartTimeout: e.StartTimeout,
StopTimeout: e.StopTimeout,
}
// unconditionally re-set this so we can successfully restart
// TODO(directxman12): we supported this in the past, but do we actually
// want to support re-using an API server object to restart? The loss
// of provisioned users is surprising to say the least.
if err := e.processState.Init("etcd"); err != nil {
return err
}
// Set the listen url.
if e.URL == nil {
port, host, err := addr.Suggest("")
if err != nil {
return err
}
e.URL = &url.URL{
Scheme: "http",
Host: net.JoinHostPort(host, strconv.Itoa(port)),
}
}
// Set the listen peer URL.
{
port, host, err := addr.Suggest("")
if err != nil {
return err
}
e.listenPeerURL = &url.URL{
Scheme: "http",
Host: net.JoinHostPort(host, strconv.Itoa(port)),
}
}
// can use /health as of etcd 3.3.0
e.processState.HealthCheck.URL = *e.URL
e.processState.HealthCheck.Path = "/health"
e.DataDir = e.processState.Dir
e.Path = e.processState.Path
e.StartTimeout = e.processState.StartTimeout
e.StopTimeout = e.processState.StopTimeout
var err error
e.processState.Args, e.Args, err = process.TemplateAndArguments(e.Args, e.Configure(), process.TemplateDefaults{ //nolint:staticcheck
Data: e,
Defaults: e.defaultArgs(),
})
return err
}
// Stop stops this process gracefully, waits for its termination, and cleans up
// the DataDir if necessary.
func (e *Etcd) Stop() error {
if e.processState.DirNeedsCleaning {
e.DataDir = "" // reset the directory if it was randomly allocated, so that we can safely restart
}
return e.processState.Stop()
}
func (e *Etcd) defaultArgs() map[string][]string {
args := map[string][]string{
"listen-peer-urls": {e.listenPeerURL.String()},
"data-dir": {e.DataDir},
}
if e.URL != nil {
args["advertise-client-urls"] = []string{e.URL.String()}
args["listen-client-urls"] = []string{e.URL.String()}
}
// Add unsafe no fsync, available from etcd 3.5
if ok, _ := e.processState.CheckFlag("unsafe-no-fsync"); ok {
args["unsafe-no-fsync"] = []string{"true"}
}
return args
}
// Configure returns Arguments that may be used to customize the
// flags used to launch etcd. A set of defaults will
// be applied underneath.
func (e *Etcd) Configure() *process.Arguments {
if e.args == nil {
e.args = process.EmptyArguments()
}
return e.args
}
// EtcdDefaultArgs exposes the default args for Etcd so that you
// can use those to append your own additional arguments.
var EtcdDefaultArgs = []string{
"--listen-peer-urls=http://localhost:0",
"--advertise-client-urls={{ if .URL }}{{ .URL.String }}{{ end }}",
"--listen-client-urls={{ if .URL }}{{ .URL.String }}{{ end }}",
"--data-dir={{ .DataDir }}",
}

View File

@@ -0,0 +1,119 @@
/*
Copyright 2021 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package controlplane
import (
"bytes"
"fmt"
"io"
"net/url"
"os/exec"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"
kcapi "k8s.io/client-go/tools/clientcmd/api"
"sigs.k8s.io/controller-runtime/pkg/internal/testing/process"
)
const (
envtestName = "envtest"
)
// KubeConfigFromREST reverse-engineers a kubeconfig file from a rest.Config.
// The options are tailored towards the rest.Configs we generate, so they're
// not broadly applicable.
//
// This is not intended to be exposed beyond internal for the above reasons.
func KubeConfigFromREST(cfg *rest.Config) ([]byte, error) {
kubeConfig := kcapi.NewConfig()
protocol := "https"
if !rest.IsConfigTransportTLS(*cfg) {
protocol = "http"
}
// cfg.Host is a URL, so we need to parse it so we can properly append the API path
baseURL, err := url.Parse(cfg.Host)
if err != nil {
return nil, fmt.Errorf("unable to interpret config's host value as a URL: %w", err)
}
kubeConfig.Clusters[envtestName] = &kcapi.Cluster{
// TODO(directxman12): if client-go ever decides to expose defaultServerUrlFor(config),
// we can just use that. Note that this is not the same as the public DefaultServerURL,
// which requires us to pass a bunch of stuff in manually.
Server: (&url.URL{Scheme: protocol, Host: baseURL.Host, Path: cfg.APIPath}).String(),
CertificateAuthorityData: cfg.CAData,
}
kubeConfig.AuthInfos[envtestName] = &kcapi.AuthInfo{
// try to cover all auth strategies that aren't plugins
ClientCertificateData: cfg.CertData,
ClientKeyData: cfg.KeyData,
Token: cfg.BearerToken,
Username: cfg.Username,
Password: cfg.Password,
}
kcCtx := kcapi.NewContext()
kcCtx.Cluster = envtestName
kcCtx.AuthInfo = envtestName
kubeConfig.Contexts[envtestName] = kcCtx
kubeConfig.CurrentContext = envtestName
contents, err := clientcmd.Write(*kubeConfig)
if err != nil {
return nil, fmt.Errorf("unable to serialize kubeconfig file: %w", err)
}
return contents, nil
}
// KubeCtl is a wrapper around the kubectl binary.
type KubeCtl struct {
// Path where the kubectl binary can be found.
//
// If this is left empty, we will attempt to locate a binary, by checking for
// the TEST_ASSET_KUBECTL environment variable, and the default test assets
// directory. See the "Binaries" section above (in doc.go) for details.
Path string
// Opts can be used to configure additional flags which will be used each
// time the wrapped binary is called.
//
// For example, you might want to use this to set the URL of the APIServer to
// connect to.
Opts []string
}
// Run executes the wrapped binary with some preconfigured options and the
// arguments given to this method. It returns Readers for the stdout and
// stderr.
func (k *KubeCtl) Run(args ...string) (stdout, stderr io.Reader, err error) {
if k.Path == "" {
k.Path = process.BinPathFinder("kubectl", "")
}
stdoutBuffer := &bytes.Buffer{}
stderrBuffer := &bytes.Buffer{}
allArgs := append(k.Opts, args...)
cmd := exec.Command(k.Path, allArgs...)
cmd.Stdout = stdoutBuffer
cmd.Stderr = stderrBuffer
err = cmd.Run()
return stdoutBuffer, stderrBuffer, err
}

View File

@@ -0,0 +1,259 @@
/*
Copyright 2021 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package controlplane
import (
"fmt"
"net/url"
"os"
kerrors "k8s.io/apimachinery/pkg/util/errors"
"k8s.io/client-go/rest"
"sigs.k8s.io/controller-runtime/pkg/internal/testing/certs"
)
// NewTinyCA creates a new a tiny CA utility for provisioning serving certs and client certs FOR TESTING ONLY.
// Don't use this for anything else!
var NewTinyCA = certs.NewTinyCA
// ControlPlane is a struct that knows how to start your test control plane.
//
// Right now, that means Etcd and your APIServer. This is likely to increase in
// future.
type ControlPlane struct {
APIServer *APIServer
Etcd *Etcd
// Kubectl will override the default asset search path for kubectl
KubectlPath string
// for the deprecated methods (Kubectl, etc)
defaultUserCfg *rest.Config
defaultUserKubectl *KubeCtl
}
// Start will start your control plane processes. To stop them, call Stop().
func (f *ControlPlane) Start() (retErr error) {
if f.Etcd == nil {
f.Etcd = &Etcd{}
}
if err := f.Etcd.Start(); err != nil {
return err
}
defer func() {
if retErr != nil {
_ = f.Etcd.Stop()
}
}()
if f.APIServer == nil {
f.APIServer = &APIServer{}
}
f.APIServer.EtcdURL = f.Etcd.URL
if err := f.APIServer.Start(); err != nil {
return err
}
defer func() {
if retErr != nil {
_ = f.APIServer.Stop()
}
}()
// provision the default user -- can be removed when the related
// methods are removed. The default user has admin permissions to
// mimic legacy no-authz setups.
user, err := f.AddUser(User{Name: "default", Groups: []string{"system:masters"}}, &rest.Config{})
if err != nil {
return fmt.Errorf("unable to provision the default (legacy) user: %w", err)
}
kubectl, err := user.Kubectl()
if err != nil {
return fmt.Errorf("unable to provision the default (legacy) kubeconfig: %w", err)
}
f.defaultUserCfg = user.Config()
f.defaultUserKubectl = kubectl
return nil
}
// Stop will stop your control plane processes, and clean up their data.
func (f *ControlPlane) Stop() error {
var errList []error
if f.APIServer != nil {
if err := f.APIServer.Stop(); err != nil {
errList = append(errList, err)
}
}
if f.Etcd != nil {
if err := f.Etcd.Stop(); err != nil {
errList = append(errList, err)
}
}
return kerrors.NewAggregate(errList)
}
// APIURL returns the URL you should connect to to talk to your API server.
//
// If insecure serving is configured, this will contain the insecure port.
// Otherwise, it will contain the secure port.
//
// Deprecated: use AddUser instead, or APIServer.{Ins|S}ecureServing.URL if
// you really want just the URL.
func (f *ControlPlane) APIURL() *url.URL {
return f.APIServer.URL
}
// KubeCtl returns a pre-configured KubeCtl, ready to connect to this
// ControlPlane.
//
// Deprecated: use AddUser & AuthenticatedUser.Kubectl instead.
func (f *ControlPlane) KubeCtl() *KubeCtl {
return f.defaultUserKubectl
}
// RESTClientConfig returns a pre-configured restconfig, ready to connect to
// this ControlPlane.
//
// Deprecated: use AddUser & AuthenticatedUser.Config instead.
func (f *ControlPlane) RESTClientConfig() (*rest.Config, error) {
return f.defaultUserCfg, nil
}
// AuthenticatedUser contains access information for an provisioned user,
// including REST config, kubeconfig contents, and access to a KubeCtl instance.
//
// It's not "safe" to use the methods on this till after the API server has been
// started (due to certificate initialization and such). The various methods will
// panic if this is done.
type AuthenticatedUser struct {
// cfg is the rest.Config for connecting to the API server. It's lazily initialized.
cfg *rest.Config
// cfgIsComplete indicates the cfg has had late-initialized fields (e.g.
// API server CA data) initialized.
cfgIsComplete bool
// apiServer is a handle to the APIServer that's used when finalizing cfg
// and producing the kubectl instance.
plane *ControlPlane
// kubectl is our existing, provisioned kubectl. We don't provision one
// till someone actually asks for it.
kubectl *KubeCtl
}
// Config returns the REST config that can be used to connect to the API server
// as this user.
//
// Will panic if used before the API server is started.
func (u *AuthenticatedUser) Config() *rest.Config {
// NB(directxman12): we choose to panic here for ergonomics sake, and because there's
// not really much you can do to "handle" this error. This machinery is intended to be
// used in tests anyway, so panicing is not a particularly big deal.
if u.cfgIsComplete {
return u.cfg
}
if len(u.plane.APIServer.SecureServing.CA) == 0 {
panic("the API server has not yet been started, please do that before accessing connection details")
}
u.cfg.CAData = u.plane.APIServer.SecureServing.CA
u.cfg.Host = u.plane.APIServer.SecureServing.URL("https", "/").String()
u.cfgIsComplete = true
return u.cfg
}
// KubeConfig returns a KubeConfig that's roughly equivalent to this user's REST config.
//
// Will panic if used before the API server is started.
func (u AuthenticatedUser) KubeConfig() ([]byte, error) {
// NB(directxman12): we don't return the actual API object to avoid yet another
// piece of kubernetes API in our public API, and also because generally the thing
// you want to do with this is just write it out to a file for external debugging
// purposes, etc.
return KubeConfigFromREST(u.Config())
}
// Kubectl returns a KubeCtl instance for talking to the API server as this user. It uses
// a kubeconfig equivalent to that returned by .KubeConfig.
//
// Will panic if used before the API server is started.
func (u *AuthenticatedUser) Kubectl() (*KubeCtl, error) {
if u.kubectl != nil {
return u.kubectl, nil
}
if len(u.plane.APIServer.CertDir) == 0 {
panic("the API server has not yet been started, please do that before accessing connection details")
}
// cleaning this up is handled when our tmpDir is deleted
out, err := os.CreateTemp(u.plane.APIServer.CertDir, "*.kubecfg")
if err != nil {
return nil, fmt.Errorf("unable to create file for kubeconfig: %w", err)
}
defer out.Close()
contents, err := KubeConfigFromREST(u.Config())
if err != nil {
return nil, err
}
if _, err := out.Write(contents); err != nil {
return nil, fmt.Errorf("unable to write kubeconfig to disk at %s: %w", out.Name(), err)
}
k := &KubeCtl{
Path: u.plane.KubectlPath,
}
k.Opts = append(k.Opts, fmt.Sprintf("--kubeconfig=%s", out.Name()))
u.kubectl = k
return k, nil
}
// AddUser provisions a new user in the cluster. It uses the APIServer's authentication
// strategy -- see APIServer.SecureServing.Authn.
//
// Unlike AddUser, it's safe to pass a nil rest.Config here if you have no
// particular opinions about the config.
//
// The default authentication strategy is not guaranteed to any specific strategy, but it is
// guaranteed to be callable both before and after Start has been called (but, as noted in the
// AuthenticatedUser docs, the given user objects are only valid after Start has been called).
func (f *ControlPlane) AddUser(user User, baseConfig *rest.Config) (*AuthenticatedUser, error) {
if f.GetAPIServer().SecureServing.Authn == nil {
return nil, fmt.Errorf("no API server authentication is configured yet. The API server defaults one when Start is called, did you mean to use that?")
}
if baseConfig == nil {
baseConfig = &rest.Config{}
}
cfg, err := f.GetAPIServer().SecureServing.AddUser(user, baseConfig)
if err != nil {
return nil, err
}
return &AuthenticatedUser{
cfg: cfg,
plane: f,
}, nil
}
// GetAPIServer returns this ControlPlane's APIServer, initializing it if necessary.
func (f *ControlPlane) GetAPIServer() *APIServer {
if f.APIServer == nil {
f.APIServer = &APIServer{}
}
return f.APIServer
}

View File

@@ -0,0 +1,340 @@
/*
Copyright 2021 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package process
import (
"bytes"
"html/template"
"sort"
"strings"
)
// RenderTemplates returns an []string to render the templates
//
// Deprecated: will be removed in favor of Arguments.
func RenderTemplates(argTemplates []string, data interface{}) (args []string, err error) {
var t *template.Template
for _, arg := range argTemplates {
t, err = template.New(arg).Parse(arg)
if err != nil {
args = nil
return
}
buf := &bytes.Buffer{}
err = t.Execute(buf, data)
if err != nil {
args = nil
return
}
args = append(args, buf.String())
}
return
}
// SliceToArguments converts a slice of arguments to structured arguments,
// appending each argument that starts with `--` and contains an `=` to the
// argument set (ignoring defaults), returning the rest.
//
// Deprecated: will be removed when RenderTemplates is removed.
func SliceToArguments(sliceArgs []string, args *Arguments) []string {
var rest []string
for i, arg := range sliceArgs {
if arg == "--" {
rest = append(rest, sliceArgs[i:]...)
return rest
}
// skip non-flag arguments, skip arguments w/o equals because we
// can't tell if the next argument should take a value
if !strings.HasPrefix(arg, "--") || !strings.Contains(arg, "=") {
rest = append(rest, arg)
continue
}
parts := strings.SplitN(arg[2:], "=", 2)
name := parts[0]
val := parts[1]
args.AppendNoDefaults(name, val)
}
return rest
}
// TemplateDefaults specifies defaults to be used for joining structured arguments with templates.
//
// Deprecated: will be removed when RenderTemplates is removed.
type TemplateDefaults struct {
// Data will be used to render the template.
Data interface{}
// Defaults will be used to default structured arguments if no template is passed.
Defaults map[string][]string
// MinimalDefaults will be used to default structured arguments if a template is passed.
// Use this for flags which *must* be present.
MinimalDefaults map[string][]string // for api server service-cluster-ip-range
}
// TemplateAndArguments joins structured arguments and non-structured arguments, preserving existing
// behavior. Namely:
//
// 1. if templ has len > 0, it will be rendered against data
// 2. the rendered template values that look like `--foo=bar` will be split
// and appended to args, the rest will be kept around
// 3. the given args will be rendered as string form. If a template is given,
// no defaults will be used, otherwise defaults will be used
// 4. a result of [args..., rest...] will be returned
//
// It returns the resulting rendered arguments, plus the arguments that were
// not transferred to `args` during rendering.
//
// Deprecated: will be removed when RenderTemplates is removed.
func TemplateAndArguments(templ []string, args *Arguments, data TemplateDefaults) (allArgs []string, nonFlagishArgs []string, err error) {
if len(templ) == 0 { // 3 & 4 (no template case)
return args.AsStrings(data.Defaults), nil, nil
}
// 1: render the template
rendered, err := RenderTemplates(templ, data.Data)
if err != nil {
return nil, nil, err
}
// 2: filter out structured args and add them to args
rest := SliceToArguments(rendered, args)
// 3 (template case): render structured args, no defaults (matching the
// legacy case where if Args was specified, no defaults were used)
res := args.AsStrings(data.MinimalDefaults)
// 4: return the rendered structured args + all non-structured args
return append(res, rest...), rest, nil
}
// EmptyArguments constructs an empty set of flags with no defaults.
func EmptyArguments() *Arguments {
return &Arguments{
values: make(map[string]Arg),
}
}
// Arguments are structured, overridable arguments.
// Each Arguments object contains some set of default arguments, which may
// be appended to, or overridden.
//
// When ready, you can serialize them to pass to exec.Command and friends using
// AsStrings.
//
// All flag-setting methods return the *same* instance of Arguments so that you
// can chain calls.
type Arguments struct {
// values contains the user-set values for the arguments.
// `values[key] = dontPass` means "don't pass this flag"
// `values[key] = passAsName` means "pass this flag without args like --key`
// `values[key] = []string{a, b, c}` means "--key=a --key=b --key=c`
// any values not explicitly set here will be copied from defaults on final rendering.
values map[string]Arg
}
// Arg is an argument that has one or more values,
// and optionally falls back to default values.
type Arg interface {
// Append adds new values to this argument, returning
// a new instance contain the new value. The intermediate
// argument should generally be assumed to be consumed.
Append(vals ...string) Arg
// Get returns the full set of values, optionally including
// the passed in defaults. If it returns nil, this will be
// skipped. If it returns a non-nil empty slice, it'll be
// assumed that the argument should be passed as name-only.
Get(defaults []string) []string
}
type userArg []string
func (a userArg) Append(vals ...string) Arg {
return userArg(append(a, vals...)) //nolint:unconvert
}
func (a userArg) Get(_ []string) []string {
return []string(a)
}
type defaultedArg []string
func (a defaultedArg) Append(vals ...string) Arg {
return defaultedArg(append(a, vals...)) //nolint:unconvert
}
func (a defaultedArg) Get(defaults []string) []string {
res := append([]string(nil), defaults...)
return append(res, a...)
}
type dontPassArg struct{}
func (a dontPassArg) Append(vals ...string) Arg {
return userArg(vals)
}
func (dontPassArg) Get(_ []string) []string {
return nil
}
type passAsNameArg struct{}
func (a passAsNameArg) Append(_ ...string) Arg {
return passAsNameArg{}
}
func (passAsNameArg) Get(_ []string) []string {
return []string{}
}
var (
// DontPass indicates that the given argument will not actually be
// rendered.
DontPass Arg = dontPassArg{}
// PassAsName indicates that the given flag will be passed as `--key`
// without any value.
PassAsName Arg = passAsNameArg{}
)
// AsStrings serializes this set of arguments to a slice of strings appropriate
// for passing to exec.Command and friends, making use of the given defaults
// as indicated for each particular argument.
//
// - Any flag in defaults that's not in Arguments will be present in the output
// - Any flag that's present in Arguments will be passed the corresponding
// defaults to do with as it will (ignore, append-to, suppress, etc).
func (a *Arguments) AsStrings(defaults map[string][]string) []string {
// sort for deterministic ordering
keysInOrder := make([]string, 0, len(defaults)+len(a.values))
for key := range defaults {
if _, userSet := a.values[key]; userSet {
continue
}
keysInOrder = append(keysInOrder, key)
}
for key := range a.values {
keysInOrder = append(keysInOrder, key)
}
sort.Strings(keysInOrder)
var res []string
for _, key := range keysInOrder {
vals := a.Get(key).Get(defaults[key])
switch {
case vals == nil: // don't pass
continue
case len(vals) == 0: // pass as name
res = append(res, "--"+key)
default:
for _, val := range vals {
res = append(res, "--"+key+"="+val)
}
}
}
return res
}
// Get returns the value of the given flag. If nil,
// it will not be passed in AsString, otherwise:
//
// len == 0 --> `--key`, len > 0 --> `--key=val1 --key=val2 ...`.
func (a *Arguments) Get(key string) Arg {
if vals, ok := a.values[key]; ok {
return vals
}
return defaultedArg(nil)
}
// Enable configures the given key to be passed as a "name-only" flag,
// like, `--key`.
func (a *Arguments) Enable(key string) *Arguments {
a.values[key] = PassAsName
return a
}
// Disable prevents this flag from be passed.
func (a *Arguments) Disable(key string) *Arguments {
a.values[key] = DontPass
return a
}
// Append adds additional values to this flag. If this flag has
// yet to be set, initial values will include defaults. If you want
// to intentionally ignore defaults/start from scratch, call AppendNoDefaults.
//
// Multiple values will look like `--key=value1 --key=value2 ...`.
func (a *Arguments) Append(key string, values ...string) *Arguments {
vals, present := a.values[key]
if !present {
vals = defaultedArg{}
}
a.values[key] = vals.Append(values...)
return a
}
// AppendNoDefaults adds additional values to this flag. However,
// unlike Append, it will *not* copy values from defaults.
func (a *Arguments) AppendNoDefaults(key string, values ...string) *Arguments {
vals, present := a.values[key]
if !present {
vals = userArg{}
}
a.values[key] = vals.Append(values...)
return a
}
// Set resets the given flag to the specified values, ignoring any existing
// values or defaults.
func (a *Arguments) Set(key string, values ...string) *Arguments {
a.values[key] = userArg(values)
return a
}
// SetRaw sets the given flag to the given Arg value directly. Use this if
// you need to do some complicated deferred logic or something.
//
// Otherwise behaves like Set.
func (a *Arguments) SetRaw(key string, val Arg) *Arguments {
a.values[key] = val
return a
}
// FuncArg is a basic implementation of Arg that can be used for custom argument logic,
// like pulling values out of APIServer, or dynamically calculating values just before
// launch.
//
// The given function will be mapped directly to Arg#Get, and will generally be
// used in conjunction with SetRaw. For example, to set `--some-flag` to the
// API server's CertDir, you could do:
//
// server.Configure().SetRaw("--some-flag", FuncArg(func(defaults []string) []string {
// return []string{server.CertDir}
// }))
//
// FuncArg ignores Appends; if you need to support appending values too, consider implementing
// Arg directly.
type FuncArg func([]string) []string
// Append is a no-op for FuncArg, and just returns itself.
func (a FuncArg) Append(vals ...string) Arg { return a }
// Get delegates functionality to the FuncArg function itself.
func (a FuncArg) Get(defaults []string) []string {
return a(defaults)
}

View File

@@ -0,0 +1,70 @@
/*
Copyright 2021 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package process
import (
"os"
"path/filepath"
"regexp"
"strings"
)
const (
// EnvAssetsPath is the environment variable that stores the global test
// binary location override.
EnvAssetsPath = "KUBEBUILDER_ASSETS"
// EnvAssetOverridePrefix is the environment variable prefix for per-binary
// location overrides.
EnvAssetOverridePrefix = "TEST_ASSET_"
// AssetsDefaultPath is the default location to look for test binaries in,
// if no override was provided.
AssetsDefaultPath = "/usr/local/kubebuilder/bin"
)
// BinPathFinder finds the path to the given named binary, using the following locations
// in order of precedence (highest first). Notice that the various env vars only need
// to be set -- the asset is not checked for existence on the filesystem.
//
// 1. TEST_ASSET_{tr/a-z-/A-Z_/} (if set; asset overrides -- EnvAssetOverridePrefix)
// 1. KUBEBUILDER_ASSETS (if set; global asset path -- EnvAssetsPath)
// 3. assetDirectory (if set; per-config asset directory)
// 4. /usr/local/kubebuilder/bin (AssetsDefaultPath).
func BinPathFinder(symbolicName, assetDirectory string) (binPath string) {
punctuationPattern := regexp.MustCompile("[^A-Z0-9]+")
sanitizedName := punctuationPattern.ReplaceAllString(strings.ToUpper(symbolicName), "_")
leadingNumberPattern := regexp.MustCompile("^[0-9]+")
sanitizedName = leadingNumberPattern.ReplaceAllString(sanitizedName, "")
envVar := EnvAssetOverridePrefix + sanitizedName
// TEST_ASSET_XYZ
if val, ok := os.LookupEnv(envVar); ok {
return val
}
// KUBEBUILDER_ASSETS
if val, ok := os.LookupEnv(EnvAssetsPath); ok {
return filepath.Join(val, symbolicName)
}
// assetDirectory
if assetDirectory != "" {
return filepath.Join(assetDirectory, symbolicName)
}
// default path
return filepath.Join(AssetsDefaultPath, symbolicName)
}

View File

@@ -0,0 +1,272 @@
/*
Copyright 2021 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package process
import (
"crypto/tls"
"fmt"
"io"
"net"
"net/http"
"net/url"
"os"
"os/exec"
"path"
"regexp"
"sync"
"syscall"
"time"
)
// ListenAddr represents some listening address and port.
type ListenAddr struct {
Address string
Port string
}
// URL returns a URL for this address with the given scheme and subpath.
func (l *ListenAddr) URL(scheme string, path string) *url.URL {
return &url.URL{
Scheme: scheme,
Host: l.HostPort(),
Path: path,
}
}
// HostPort returns the joined host-port pair for this address.
func (l *ListenAddr) HostPort() string {
return net.JoinHostPort(l.Address, l.Port)
}
// HealthCheck describes the information needed to health-check a process via
// some health-check URL.
type HealthCheck struct {
url.URL
// HealthCheckPollInterval is the interval which will be used for polling the
// endpoint described by Host, Port, and Path.
//
// If left empty it will default to 100 Milliseconds.
PollInterval time.Duration
}
// State define the state of the process.
type State struct {
Cmd *exec.Cmd
// HealthCheck describes how to check if this process is up. If we get an http.StatusOK,
// we assume the process is ready to operate.
//
// For example, the /healthz endpoint of the k8s API server, or the /health endpoint of etcd.
HealthCheck HealthCheck
Args []string
StopTimeout time.Duration
StartTimeout time.Duration
Dir string
DirNeedsCleaning bool
Path string
// ready holds whether the process is currently in ready state (hit the ready condition) or not.
// It will be set to true on a successful `Start()` and set to false on a successful `Stop()`
ready bool
// waitDone is closed when our call to wait finishes up, and indicates that
// our process has terminated.
waitDone chan struct{}
errMu sync.Mutex
exitErr error
exited bool
}
// Init sets up this process, configuring binary paths if missing, initializing
// temporary directories, etc.
//
// This defaults all defaultable fields.
func (ps *State) Init(name string) error {
if ps.Path == "" {
if name == "" {
return fmt.Errorf("must have at least one of name or path")
}
ps.Path = BinPathFinder(name, "")
}
if ps.Dir == "" {
newDir, err := os.MkdirTemp("", "k8s_test_framework_")
if err != nil {
return err
}
ps.Dir = newDir
ps.DirNeedsCleaning = true
}
if ps.StartTimeout == 0 {
ps.StartTimeout = 20 * time.Second
}
if ps.StopTimeout == 0 {
ps.StopTimeout = 20 * time.Second
}
return nil
}
type stopChannel chan struct{}
// CheckFlag checks the help output of this command for the presence of the given flag, specified
// without the leading `--` (e.g. `CheckFlag("insecure-port")` checks for `--insecure-port`),
// returning true if the flag is present.
func (ps *State) CheckFlag(flag string) (bool, error) {
cmd := exec.Command(ps.Path, "--help")
outContents, err := cmd.CombinedOutput()
if err != nil {
return false, fmt.Errorf("unable to run command %q to check for flag %q: %w", ps.Path, flag, err)
}
pat := `(?m)^\s*--` + flag + `\b` // (m --> multi-line --> ^ matches start of line)
matched, err := regexp.Match(pat, outContents)
if err != nil {
return false, fmt.Errorf("unable to check command %q for flag %q in help output: %w", ps.Path, flag, err)
}
return matched, nil
}
// Start starts the apiserver, waits for it to come up, and returns an error,
// if occurred.
func (ps *State) Start(stdout, stderr io.Writer) (err error) {
if ps.ready {
return nil
}
ps.Cmd = exec.Command(ps.Path, ps.Args...)
ps.Cmd.Stdout = stdout
ps.Cmd.Stderr = stderr
ready := make(chan bool)
timedOut := time.After(ps.StartTimeout)
pollerStopCh := make(stopChannel)
go pollURLUntilOK(ps.HealthCheck.URL, ps.HealthCheck.PollInterval, ready, pollerStopCh)
ps.waitDone = make(chan struct{})
if err := ps.Cmd.Start(); err != nil {
ps.errMu.Lock()
defer ps.errMu.Unlock()
ps.exited = true
return err
}
go func() {
defer close(ps.waitDone)
err := ps.Cmd.Wait()
ps.errMu.Lock()
defer ps.errMu.Unlock()
ps.exitErr = err
ps.exited = true
}()
select {
case <-ready:
ps.ready = true
return nil
case <-ps.waitDone:
close(pollerStopCh)
return fmt.Errorf("timeout waiting for process %s to start successfully "+
"(it may have failed to start, or stopped unexpectedly before becoming ready)",
path.Base(ps.Path))
case <-timedOut:
close(pollerStopCh)
if ps.Cmd != nil {
// intentionally ignore this -- we might've crashed, failed to start, etc
ps.Cmd.Process.Signal(syscall.SIGTERM) //nolint:errcheck
}
return fmt.Errorf("timeout waiting for process %s to start", path.Base(ps.Path))
}
}
// Exited returns true if the process exited, and may also
// return an error (as per Cmd.Wait) if the process did not
// exit with error code 0.
func (ps *State) Exited() (bool, error) {
ps.errMu.Lock()
defer ps.errMu.Unlock()
return ps.exited, ps.exitErr
}
func pollURLUntilOK(url url.URL, interval time.Duration, ready chan bool, stopCh stopChannel) {
client := &http.Client{
Transport: &http.Transport{
TLSClientConfig: &tls.Config{
// there's probably certs *somewhere*,
// but it's fine to just skip validating
// them for health checks during testing
InsecureSkipVerify: true, //nolint:gosec
},
},
}
if interval <= 0 {
interval = 100 * time.Millisecond
}
for {
res, err := client.Get(url.String())
if err == nil {
res.Body.Close()
if res.StatusCode == http.StatusOK {
ready <- true
return
}
}
select {
case <-stopCh:
return
default:
time.Sleep(interval)
}
}
}
// Stop stops this process gracefully, waits for its termination, and cleans up
// the CertDir if necessary.
func (ps *State) Stop() error {
// Always clear the directory if we need to.
defer func() {
if ps.DirNeedsCleaning {
_ = os.RemoveAll(ps.Dir)
}
}()
if ps.Cmd == nil {
return nil
}
if done, _ := ps.Exited(); done {
return nil
}
if err := ps.Cmd.Process.Signal(syscall.SIGTERM); err != nil {
return fmt.Errorf("unable to signal for process %s to stop: %w", ps.Path, err)
}
timedOut := time.After(ps.StopTimeout)
select {
case <-ps.waitDone:
break
case <-timedOut:
return fmt.Errorf("timeout waiting for process %s to stop", path.Base(ps.Path))
}
ps.ready = false
return nil
}

View File

@@ -0,0 +1,24 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
Package leaderelection contains a constructor for a leader election resource lock.
This is used to ensure that multiple copies of a controller manager can be run with
only one active set of controllers, for active-passive HA.
It uses built-in Kubernetes leader election APIs.
*/
package leaderelection

View File

@@ -0,0 +1,127 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package leaderelection
import (
"errors"
"fmt"
"os"
"k8s.io/apimachinery/pkg/util/uuid"
coordinationv1client "k8s.io/client-go/kubernetes/typed/coordination/v1"
corev1client "k8s.io/client-go/kubernetes/typed/core/v1"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/leaderelection/resourcelock"
"sigs.k8s.io/controller-runtime/pkg/recorder"
)
const inClusterNamespacePath = "/var/run/secrets/kubernetes.io/serviceaccount/namespace"
// Options provides the required configuration to create a new resource lock.
type Options struct {
// LeaderElection determines whether or not to use leader election when
// starting the manager.
LeaderElection bool
// LeaderElectionResourceLock determines which resource lock to use for leader election,
// defaults to "leases".
LeaderElectionResourceLock string
// LeaderElectionNamespace determines the namespace in which the leader
// election resource will be created.
LeaderElectionNamespace string
// LeaderElectionID determines the name of the resource that leader election
// will use for holding the leader lock.
LeaderElectionID string
}
// NewResourceLock creates a new resource lock for use in a leader election loop.
func NewResourceLock(config *rest.Config, recorderProvider recorder.Provider, options Options) (resourcelock.Interface, error) {
if !options.LeaderElection {
return nil, nil
}
// Default resource lock to "leases". The previous default (from v0.7.0 to v0.11.x) was configmapsleases, which was
// used to migrate from configmaps to leases. Since the default was "configmapsleases" for over a year, spanning
// five minor releases, any actively maintained operators are very likely to have a released version that uses
// "configmapsleases". Therefore defaulting to "leases" should be safe.
if options.LeaderElectionResourceLock == "" {
options.LeaderElectionResourceLock = resourcelock.LeasesResourceLock
}
// LeaderElectionID must be provided to prevent clashes
if options.LeaderElectionID == "" {
return nil, errors.New("LeaderElectionID must be configured")
}
// Default the namespace (if running in cluster)
if options.LeaderElectionNamespace == "" {
var err error
options.LeaderElectionNamespace, err = getInClusterNamespace()
if err != nil {
return nil, fmt.Errorf("unable to find leader election namespace: %w", err)
}
}
// Leader id, needs to be unique
id, err := os.Hostname()
if err != nil {
return nil, err
}
id = id + "_" + string(uuid.NewUUID())
// Construct clients for leader election
rest.AddUserAgent(config, "leader-election")
corev1Client, err := corev1client.NewForConfig(config)
if err != nil {
return nil, err
}
coordinationClient, err := coordinationv1client.NewForConfig(config)
if err != nil {
return nil, err
}
return resourcelock.New(options.LeaderElectionResourceLock,
options.LeaderElectionNamespace,
options.LeaderElectionID,
corev1Client,
coordinationClient,
resourcelock.ResourceLockConfig{
Identity: id,
EventRecorder: recorderProvider.GetEventRecorderFor(id),
})
}
func getInClusterNamespace() (string, error) {
// Check whether the namespace file exists.
// If not, we are not running in cluster so can't guess the namespace.
if _, err := os.Stat(inClusterNamespacePath); os.IsNotExist(err) {
return "", fmt.Errorf("not running in-cluster, please specify LeaderElectionNamespace")
} else if err != nil {
return "", fmt.Errorf("error checking namespace file: %w", err)
}
// Load the namespace file and return its content
namespace, err := os.ReadFile(inClusterNamespacePath)
if err != nil {
return "", fmt.Errorf("error reading namespace file: %w", err)
}
return string(namespace), nil
}

199
vendor/sigs.k8s.io/controller-runtime/pkg/log/deleg.go generated vendored Normal file
View File

@@ -0,0 +1,199 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package log
import (
"sync"
"github.com/go-logr/logr"
)
// loggerPromise knows how to populate a concrete logr.Logger
// with options, given an actual base logger later on down the line.
type loggerPromise struct {
logger *DelegatingLogSink
childPromises []*loggerPromise
promisesLock sync.Mutex
name *string
tags []interface{}
}
func (p *loggerPromise) WithName(l *DelegatingLogSink, name string) *loggerPromise {
res := &loggerPromise{
logger: l,
name: &name,
promisesLock: sync.Mutex{},
}
p.promisesLock.Lock()
defer p.promisesLock.Unlock()
p.childPromises = append(p.childPromises, res)
return res
}
// WithValues provides a new Logger with the tags appended.
func (p *loggerPromise) WithValues(l *DelegatingLogSink, tags ...interface{}) *loggerPromise {
res := &loggerPromise{
logger: l,
tags: tags,
promisesLock: sync.Mutex{},
}
p.promisesLock.Lock()
defer p.promisesLock.Unlock()
p.childPromises = append(p.childPromises, res)
return res
}
// Fulfill instantiates the Logger with the provided logger.
func (p *loggerPromise) Fulfill(parentLogSink logr.LogSink) {
sink := parentLogSink
if p.name != nil {
sink = sink.WithName(*p.name)
}
if p.tags != nil {
sink = sink.WithValues(p.tags...)
}
p.logger.lock.Lock()
p.logger.logger = sink
if withCallDepth, ok := sink.(logr.CallDepthLogSink); ok {
p.logger.logger = withCallDepth.WithCallDepth(1)
}
p.logger.promise = nil
p.logger.lock.Unlock()
for _, childPromise := range p.childPromises {
childPromise.Fulfill(sink)
}
}
// DelegatingLogSink is a logsink that delegates to another logr.LogSink.
// If the underlying promise is not nil, it registers calls to sub-loggers with
// the logging factory to be populated later, and returns a new delegating
// logger. It expects to have *some* logr.Logger set at all times (generally
// a no-op logger before the promises are fulfilled).
type DelegatingLogSink struct {
lock sync.RWMutex
logger logr.LogSink
promise *loggerPromise
info logr.RuntimeInfo
}
// Init implements logr.LogSink.
func (l *DelegatingLogSink) Init(info logr.RuntimeInfo) {
l.lock.Lock()
defer l.lock.Unlock()
l.info = info
}
// Enabled tests whether this Logger is enabled. For example, commandline
// flags might be used to set the logging verbosity and disable some info
// logs.
func (l *DelegatingLogSink) Enabled(level int) bool {
l.lock.RLock()
defer l.lock.RUnlock()
return l.logger.Enabled(level)
}
// Info logs a non-error message with the given key/value pairs as context.
//
// The msg argument should be used to add some constant description to
// the log line. The key/value pairs can then be used to add additional
// variable information. The key/value pairs should alternate string
// keys and arbitrary values.
func (l *DelegatingLogSink) Info(level int, msg string, keysAndValues ...interface{}) {
l.lock.RLock()
defer l.lock.RUnlock()
l.logger.Info(level, msg, keysAndValues...)
}
// Error logs an error, with the given message and key/value pairs as context.
// It functions similarly to calling Info with the "error" named value, but may
// have unique behavior, and should be preferred for logging errors (see the
// package documentations for more information).
//
// The msg field should be used to add context to any underlying error,
// while the err field should be used to attach the actual error that
// triggered this log line, if present.
func (l *DelegatingLogSink) Error(err error, msg string, keysAndValues ...interface{}) {
l.lock.RLock()
defer l.lock.RUnlock()
l.logger.Error(err, msg, keysAndValues...)
}
// WithName provides a new Logger with the name appended.
func (l *DelegatingLogSink) WithName(name string) logr.LogSink {
l.lock.RLock()
defer l.lock.RUnlock()
if l.promise == nil {
sink := l.logger.WithName(name)
if withCallDepth, ok := sink.(logr.CallDepthLogSink); ok {
sink = withCallDepth.WithCallDepth(-1)
}
return sink
}
res := &DelegatingLogSink{logger: l.logger}
promise := l.promise.WithName(res, name)
res.promise = promise
return res
}
// WithValues provides a new Logger with the tags appended.
func (l *DelegatingLogSink) WithValues(tags ...interface{}) logr.LogSink {
l.lock.RLock()
defer l.lock.RUnlock()
if l.promise == nil {
sink := l.logger.WithValues(tags...)
if withCallDepth, ok := sink.(logr.CallDepthLogSink); ok {
sink = withCallDepth.WithCallDepth(-1)
}
return sink
}
res := &DelegatingLogSink{logger: l.logger}
promise := l.promise.WithValues(res, tags...)
res.promise = promise
return res
}
// Fulfill switches the logger over to use the actual logger
// provided, instead of the temporary initial one, if this method
// has not been previously called.
func (l *DelegatingLogSink) Fulfill(actual logr.LogSink) {
if l.promise != nil {
l.promise.Fulfill(actual)
}
}
// NewDelegatingLogSink constructs a new DelegatingLogSink which uses
// the given logger before its promise is fulfilled.
func NewDelegatingLogSink(initial logr.LogSink) *DelegatingLogSink {
l := &DelegatingLogSink{
logger: initial,
promise: &loggerPromise{promisesLock: sync.Mutex{}},
}
l.promise.logger = l
return l
}

102
vendor/sigs.k8s.io/controller-runtime/pkg/log/log.go generated vendored Normal file
View File

@@ -0,0 +1,102 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package log contains utilities for fetching a new logger
// when one is not already available.
//
// # The Log Handle
//
// This package contains a root logr.Logger Log. It may be used to
// get a handle to whatever the root logging implementation is. By
// default, no implementation exists, and the handle returns "promises"
// to loggers. When the implementation is set using SetLogger, these
// "promises" will be converted over to real loggers.
//
// # Logr
//
// All logging in controller-runtime is structured, using a set of interfaces
// defined by a package called logr
// (https://pkg.go.dev/github.com/go-logr/logr). The sub-package zap provides
// helpers for setting up logr backed by Zap (go.uber.org/zap).
package log
import (
"context"
"sync"
"time"
"github.com/go-logr/logr"
)
// SetLogger sets a concrete logging implementation for all deferred Loggers.
func SetLogger(l logr.Logger) {
loggerWasSetLock.Lock()
defer loggerWasSetLock.Unlock()
loggerWasSet = true
dlog.Fulfill(l.GetSink())
}
// It is safe to assume that if this wasn't set within the first 30 seconds of a binaries
// lifetime, it will never get set. The DelegatingLogSink causes a high number of memory
// allocations when not given an actual Logger, so we set a NullLogSink to avoid that.
//
// We need to keep the DelegatingLogSink because we have various inits() that get a logger from
// here. They will always get executed before any code that imports controller-runtime
// has a chance to run and hence to set an actual logger.
func init() {
// Init is blocking, so start a new goroutine
go func() {
time.Sleep(30 * time.Second)
loggerWasSetLock.Lock()
defer loggerWasSetLock.Unlock()
if !loggerWasSet {
dlog.Fulfill(NullLogSink{})
}
}()
}
var (
loggerWasSetLock sync.Mutex
loggerWasSet bool
)
// Log is the base logger used by kubebuilder. It delegates
// to another logr.Logger. You *must* call SetLogger to
// get any actual logging. If SetLogger is not called within
// the first 30 seconds of a binaries lifetime, it will get
// set to a NullLogSink.
var (
dlog = NewDelegatingLogSink(NullLogSink{})
Log = logr.New(dlog)
)
// FromContext returns a logger with predefined values from a context.Context.
func FromContext(ctx context.Context, keysAndValues ...interface{}) logr.Logger {
log := Log
if ctx != nil {
if logger, err := logr.FromContext(ctx); err == nil {
log = logger
}
}
return log.WithValues(keysAndValues...)
}
// IntoContext takes a context and sets the logger as one of its values.
// Use FromContext function to retrieve the logger.
func IntoContext(ctx context.Context, log logr.Logger) context.Context {
return logr.NewContext(ctx, log)
}

59
vendor/sigs.k8s.io/controller-runtime/pkg/log/null.go generated vendored Normal file
View File

@@ -0,0 +1,59 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package log
import (
"github.com/go-logr/logr"
)
// NB: this is the same as the null logger logr/testing,
// but avoids accidentally adding the testing flags to
// all binaries.
// NullLogSink is a logr.Logger that does nothing.
type NullLogSink struct{}
var _ logr.LogSink = NullLogSink{}
// Init implements logr.LogSink.
func (log NullLogSink) Init(logr.RuntimeInfo) {
}
// Info implements logr.InfoLogger.
func (NullLogSink) Info(_ int, _ string, _ ...interface{}) {
// Do nothing.
}
// Enabled implements logr.InfoLogger.
func (NullLogSink) Enabled(level int) bool {
return false
}
// Error implements logr.Logger.
func (NullLogSink) Error(_ error, _ string, _ ...interface{}) {
// Do nothing.
}
// WithName implements logr.Logger.
func (log NullLogSink) WithName(_ string) logr.LogSink {
return log
}
// WithValues implements logr.Logger.
func (log NullLogSink) WithValues(_ ...interface{}) logr.LogSink {
return log
}

View File

@@ -0,0 +1,76 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package log
import (
"sync"
"github.com/go-logr/logr"
)
// KubeAPIWarningLoggerOptions controls the behavior
// of a rest.WarningHandler constructed using NewKubeAPIWarningLogger().
type KubeAPIWarningLoggerOptions struct {
// Deduplicate indicates a given warning message should only be written once.
// Setting this to true in a long-running process handling many warnings can
// result in increased memory use.
Deduplicate bool
}
// KubeAPIWarningLogger is a wrapper around
// a provided logr.Logger that implements the
// rest.WarningHandler interface.
type KubeAPIWarningLogger struct {
// logger is used to log responses with the warning header
logger logr.Logger
// opts contain options controlling warning output
opts KubeAPIWarningLoggerOptions
// writtenLock gurads written
writtenLock sync.Mutex
// used to keep track of already logged messages
// and help in de-duplication.
written map[string]struct{}
}
// HandleWarningHeader handles logging for responses from API server that are
// warnings with code being 299 and uses a logr.Logger for its logging purposes.
func (l *KubeAPIWarningLogger) HandleWarningHeader(code int, agent string, message string) {
if code != 299 || len(message) == 0 {
return
}
if l.opts.Deduplicate {
l.writtenLock.Lock()
defer l.writtenLock.Unlock()
if _, alreadyLogged := l.written[message]; alreadyLogged {
return
}
l.written[message] = struct{}{}
}
l.logger.Info(message)
}
// NewKubeAPIWarningLogger returns an implementation of rest.WarningHandler that logs warnings
// with code = 299 to the provided logr.Logger.
func NewKubeAPIWarningLogger(l logr.Logger, opts KubeAPIWarningLoggerOptions) *KubeAPIWarningLogger {
h := &KubeAPIWarningLogger{logger: l, opts: opts}
if opts.Deduplicate {
h.written = map[string]struct{}{}
}
return h
}

View File

@@ -0,0 +1,168 @@
/*
Copyright 2020 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package zap contains helpers for setting up a new logr.Logger instance
// using the Zap logging framework.
package zap
import (
"flag"
"fmt"
"strconv"
"strings"
"go.uber.org/zap"
"go.uber.org/zap/zapcore"
)
var levelStrings = map[string]zapcore.Level{
"debug": zap.DebugLevel,
"info": zap.InfoLevel,
"error": zap.ErrorLevel,
}
var stackLevelStrings = map[string]zapcore.Level{
"info": zap.InfoLevel,
"error": zap.ErrorLevel,
"panic": zap.PanicLevel,
}
type encoderFlag struct {
setFunc func(NewEncoderFunc)
value string
}
var _ flag.Value = &encoderFlag{}
func (ev *encoderFlag) String() string {
return ev.value
}
func (ev *encoderFlag) Type() string {
return "encoder"
}
func (ev *encoderFlag) Set(flagValue string) error {
val := strings.ToLower(flagValue)
switch val {
case "json":
ev.setFunc(newJSONEncoder)
case "console":
ev.setFunc(newConsoleEncoder)
default:
return fmt.Errorf("invalid encoder value \"%s\"", flagValue)
}
ev.value = flagValue
return nil
}
type levelFlag struct {
setFunc func(zapcore.LevelEnabler)
value string
}
var _ flag.Value = &levelFlag{}
func (ev *levelFlag) Set(flagValue string) error {
level, validLevel := levelStrings[strings.ToLower(flagValue)]
if !validLevel {
logLevel, err := strconv.Atoi(flagValue)
if err != nil {
return fmt.Errorf("invalid log level \"%s\"", flagValue)
}
if logLevel > 0 {
intLevel := -1 * logLevel
ev.setFunc(zap.NewAtomicLevelAt(zapcore.Level(int8(intLevel))))
} else {
return fmt.Errorf("invalid log level \"%s\"", flagValue)
}
} else {
ev.setFunc(zap.NewAtomicLevelAt(level))
}
ev.value = flagValue
return nil
}
func (ev *levelFlag) String() string {
return ev.value
}
func (ev *levelFlag) Type() string {
return "level"
}
type stackTraceFlag struct {
setFunc func(zapcore.LevelEnabler)
value string
}
var _ flag.Value = &stackTraceFlag{}
func (ev *stackTraceFlag) Set(flagValue string) error {
level, validLevel := stackLevelStrings[strings.ToLower(flagValue)]
if !validLevel {
return fmt.Errorf("invalid stacktrace level \"%s\"", flagValue)
}
ev.setFunc(zap.NewAtomicLevelAt(level))
ev.value = flagValue
return nil
}
func (ev *stackTraceFlag) String() string {
return ev.value
}
func (ev *stackTraceFlag) Type() string {
return "level"
}
type timeEncodingFlag struct {
setFunc func(zapcore.TimeEncoder)
value string
}
var _ flag.Value = &timeEncodingFlag{}
func (ev *timeEncodingFlag) String() string {
return ev.value
}
func (ev *timeEncodingFlag) Type() string {
return "time-encoding"
}
func (ev *timeEncodingFlag) Set(flagValue string) error {
val := strings.ToLower(flagValue)
switch val {
case "rfc3339nano":
ev.setFunc(zapcore.RFC3339NanoTimeEncoder)
case "rfc3339":
ev.setFunc(zapcore.RFC3339TimeEncoder)
case "iso8601":
ev.setFunc(zapcore.ISO8601TimeEncoder)
case "millis":
ev.setFunc(zapcore.EpochMillisTimeEncoder)
case "nanos":
ev.setFunc(zapcore.EpochNanosTimeEncoder)
case "epoch":
ev.setFunc(zapcore.EpochTimeEncoder)
default:
return fmt.Errorf("invalid time-encoding value \"%s\"", flagValue)
}
ev.value = flagValue
return nil
}

View File

@@ -0,0 +1,133 @@
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package zap
import (
"fmt"
"reflect"
"go.uber.org/zap/buffer"
"go.uber.org/zap/zapcore"
"k8s.io/apimachinery/pkg/api/meta"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
)
// KubeAwareEncoder is a Kubernetes-aware Zap Encoder.
// Instead of trying to force Kubernetes objects to implement
// ObjectMarshaller, we just implement a wrapper around a normal
// ObjectMarshaller that checks for Kubernetes objects.
type KubeAwareEncoder struct {
// Encoder is the zapcore.Encoder that this encoder delegates to
zapcore.Encoder
// Verbose controls whether or not the full object is printed.
// If false, only name, namespace, api version, and kind are printed.
// Otherwise, the full object is logged.
Verbose bool
}
// namespacedNameWrapper is a zapcore.ObjectMarshaler for Kubernetes NamespacedName.
type namespacedNameWrapper struct {
types.NamespacedName
}
func (w namespacedNameWrapper) MarshalLogObject(enc zapcore.ObjectEncoder) error {
if w.Namespace != "" {
enc.AddString("namespace", w.Namespace)
}
enc.AddString("name", w.Name)
return nil
}
// kubeObjectWrapper is a zapcore.ObjectMarshaler for Kubernetes objects.
type kubeObjectWrapper struct {
obj runtime.Object
}
// MarshalLogObject implements zapcore.ObjectMarshaler.
func (w kubeObjectWrapper) MarshalLogObject(enc zapcore.ObjectEncoder) error {
// TODO(directxman12): log kind and apiversion if not set explicitly (common case)
// -- needs an a scheme to convert to the GVK.
if reflect.ValueOf(w.obj).IsNil() {
return fmt.Errorf("got nil for runtime.Object")
}
if gvk := w.obj.GetObjectKind().GroupVersionKind(); gvk.Version != "" {
enc.AddString("apiVersion", gvk.GroupVersion().String())
enc.AddString("kind", gvk.Kind)
}
objMeta, err := meta.Accessor(w.obj)
if err != nil {
return fmt.Errorf("got runtime.Object without object metadata: %v", w.obj)
}
if ns := objMeta.GetNamespace(); ns != "" {
enc.AddString("namespace", ns)
}
enc.AddString("name", objMeta.GetName())
return nil
}
// NB(directxman12): can't just override AddReflected, since the encoder calls AddReflected on itself directly
// Clone implements zapcore.Encoder.
func (k *KubeAwareEncoder) Clone() zapcore.Encoder {
return &KubeAwareEncoder{
Encoder: k.Encoder.Clone(),
}
}
// EncodeEntry implements zapcore.Encoder.
func (k *KubeAwareEncoder) EncodeEntry(entry zapcore.Entry, fields []zapcore.Field) (*buffer.Buffer, error) {
if k.Verbose {
// Kubernetes objects implement fmt.Stringer, so if we
// want verbose output, just delegate to that.
return k.Encoder.EncodeEntry(entry, fields)
}
for i, field := range fields {
// intercept stringer fields that happen to be Kubernetes runtime.Object or
// types.NamespacedName values (Kubernetes runtime.Objects commonly
// implement String, apparently).
// *unstructured.Unstructured does NOT implement fmt.Striger interface.
// We have handle it specially.
if field.Type == zapcore.StringerType || field.Type == zapcore.ReflectType {
switch val := field.Interface.(type) {
case runtime.Object:
fields[i] = zapcore.Field{
Type: zapcore.ObjectMarshalerType,
Key: field.Key,
Interface: kubeObjectWrapper{obj: val},
}
case types.NamespacedName:
fields[i] = zapcore.Field{
Type: zapcore.ObjectMarshalerType,
Key: field.Key,
Interface: namespacedNameWrapper{NamespacedName: val},
}
}
}
}
return k.Encoder.EncodeEntry(entry, fields)
}

View File

@@ -0,0 +1,311 @@
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package zap contains helpers for setting up a new logr.Logger instance
// using the Zap logging framework.
package zap
import (
"flag"
"io"
"os"
"time"
"github.com/go-logr/logr"
"github.com/go-logr/zapr"
"go.uber.org/zap"
"go.uber.org/zap/zapcore"
)
// EncoderConfigOption is a function that can modify a `zapcore.EncoderConfig`.
type EncoderConfigOption func(*zapcore.EncoderConfig)
// NewEncoderFunc is a function that creates an Encoder using the provided EncoderConfigOptions.
type NewEncoderFunc func(...EncoderConfigOption) zapcore.Encoder
// New returns a brand new Logger configured with Opts. It
// uses KubeAwareEncoder which adds Type information and
// Namespace/Name to the log.
func New(opts ...Opts) logr.Logger {
return zapr.NewLogger(NewRaw(opts...))
}
// Opts allows to manipulate Options.
type Opts func(*Options)
// UseDevMode sets the logger to use (or not use) development mode (more
// human-readable output, extra stack traces and logging information, etc).
// See Options.Development.
func UseDevMode(enabled bool) Opts {
return func(o *Options) {
o.Development = enabled
}
}
// WriteTo configures the logger to write to the given io.Writer, instead of standard error.
// See Options.DestWriter.
func WriteTo(out io.Writer) Opts {
return func(o *Options) {
o.DestWriter = out
}
}
// Encoder configures how the logger will encode the output e.g JSON or console.
// See Options.Encoder.
func Encoder(encoder zapcore.Encoder) func(o *Options) {
return func(o *Options) {
o.Encoder = encoder
}
}
// JSONEncoder configures the logger to use a JSON Encoder.
func JSONEncoder(opts ...EncoderConfigOption) func(o *Options) {
return func(o *Options) {
o.Encoder = newJSONEncoder(opts...)
}
}
func newJSONEncoder(opts ...EncoderConfigOption) zapcore.Encoder {
encoderConfig := zap.NewProductionEncoderConfig()
for _, opt := range opts {
opt(&encoderConfig)
}
return zapcore.NewJSONEncoder(encoderConfig)
}
// ConsoleEncoder configures the logger to use a Console encoder.
func ConsoleEncoder(opts ...EncoderConfigOption) func(o *Options) {
return func(o *Options) {
o.Encoder = newConsoleEncoder(opts...)
}
}
func newConsoleEncoder(opts ...EncoderConfigOption) zapcore.Encoder {
encoderConfig := zap.NewDevelopmentEncoderConfig()
for _, opt := range opts {
opt(&encoderConfig)
}
return zapcore.NewConsoleEncoder(encoderConfig)
}
// Level sets Options.Level, which configures the the minimum enabled logging level e.g Debug, Info.
// A zap log level should be multiplied by -1 to get the logr verbosity.
// For example, to get logr verbosity of 3, pass zapcore.Level(-3) to this Opts.
// See https://pkg.go.dev/github.com/go-logr/zapr for how zap level relates to logr verbosity.
func Level(level zapcore.LevelEnabler) func(o *Options) {
return func(o *Options) {
o.Level = level
}
}
// StacktraceLevel sets Options.StacktraceLevel, which configures the logger to record a stack trace
// for all messages at or above a given level.
// See the Level Opts for the relationship of zap log level to logr verbosity.
func StacktraceLevel(stacktraceLevel zapcore.LevelEnabler) func(o *Options) {
return func(o *Options) {
o.StacktraceLevel = stacktraceLevel
}
}
// RawZapOpts allows appending arbitrary zap.Options to configure the underlying zap logger.
// See Options.ZapOpts.
func RawZapOpts(zapOpts ...zap.Option) func(o *Options) {
return func(o *Options) {
o.ZapOpts = append(o.ZapOpts, zapOpts...)
}
}
// Options contains all possible settings.
type Options struct {
// Development configures the logger to use a Zap development config
// (stacktraces on warnings, no sampling), otherwise a Zap production
// config will be used (stacktraces on errors, sampling).
Development bool
// Encoder configures how Zap will encode the output. Defaults to
// console when Development is true and JSON otherwise
Encoder zapcore.Encoder
// EncoderConfigOptions can modify the EncoderConfig needed to initialize an Encoder.
// See https://pkg.go.dev/go.uber.org/zap/zapcore#EncoderConfig for the list of options
// that can be configured.
// Note that the EncoderConfigOptions are not applied when the Encoder option is already set.
EncoderConfigOptions []EncoderConfigOption
// NewEncoder configures Encoder using the provided EncoderConfigOptions.
// Note that the NewEncoder function is not used when the Encoder option is already set.
NewEncoder NewEncoderFunc
// DestWriter controls the destination of the log output. Defaults to
// os.Stderr.
DestWriter io.Writer
// DestWritter controls the destination of the log output. Defaults to
// os.Stderr.
//
// Deprecated: Use DestWriter instead
DestWritter io.Writer
// Level configures the verbosity of the logging.
// Defaults to Debug when Development is true and Info otherwise.
// A zap log level should be multiplied by -1 to get the logr verbosity.
// For example, to get logr verbosity of 3, set this field to zapcore.Level(-3).
// See https://pkg.go.dev/github.com/go-logr/zapr for how zap level relates to logr verbosity.
Level zapcore.LevelEnabler
// StacktraceLevel is the level at and above which stacktraces will
// be recorded for all messages. Defaults to Warn when Development
// is true and Error otherwise.
// See Level for the relationship of zap log level to logr verbosity.
StacktraceLevel zapcore.LevelEnabler
// ZapOpts allows passing arbitrary zap.Options to configure on the
// underlying Zap logger.
ZapOpts []zap.Option
// TimeEncoder specifies the encoder for the timestamps in log messages.
// Defaults to EpochTimeEncoder as this is the default in Zap currently.
TimeEncoder zapcore.TimeEncoder
}
// addDefaults adds defaults to the Options.
func (o *Options) addDefaults() {
if o.DestWriter == nil && o.DestWritter == nil {
o.DestWriter = os.Stderr
} else if o.DestWriter == nil && o.DestWritter != nil {
// while misspelled DestWritter is deprecated but still not removed
o.DestWriter = o.DestWritter
}
if o.Development {
if o.NewEncoder == nil {
o.NewEncoder = newConsoleEncoder
}
if o.Level == nil {
lvl := zap.NewAtomicLevelAt(zap.DebugLevel)
o.Level = &lvl
}
if o.StacktraceLevel == nil {
lvl := zap.NewAtomicLevelAt(zap.WarnLevel)
o.StacktraceLevel = &lvl
}
o.ZapOpts = append(o.ZapOpts, zap.Development())
} else {
if o.NewEncoder == nil {
o.NewEncoder = newJSONEncoder
}
if o.Level == nil {
lvl := zap.NewAtomicLevelAt(zap.InfoLevel)
o.Level = &lvl
}
if o.StacktraceLevel == nil {
lvl := zap.NewAtomicLevelAt(zap.ErrorLevel)
o.StacktraceLevel = &lvl
}
// Disable sampling for increased Debug levels. Otherwise, this will
// cause index out of bounds errors in the sampling code.
if !o.Level.Enabled(zapcore.Level(-2)) {
o.ZapOpts = append(o.ZapOpts,
zap.WrapCore(func(core zapcore.Core) zapcore.Core {
return zapcore.NewSamplerWithOptions(core, time.Second, 100, 100)
}))
}
}
if o.TimeEncoder == nil {
o.TimeEncoder = zapcore.EpochTimeEncoder
}
f := func(ecfg *zapcore.EncoderConfig) {
ecfg.EncodeTime = o.TimeEncoder
}
// prepend instead of append it in case someone adds a time encoder option in it
o.EncoderConfigOptions = append([]EncoderConfigOption{f}, o.EncoderConfigOptions...)
if o.Encoder == nil {
o.Encoder = o.NewEncoder(o.EncoderConfigOptions...)
}
o.ZapOpts = append(o.ZapOpts, zap.AddStacktrace(o.StacktraceLevel))
}
// NewRaw returns a new zap.Logger configured with the passed Opts
// or their defaults. It uses KubeAwareEncoder which adds Type
// information and Namespace/Name to the log.
func NewRaw(opts ...Opts) *zap.Logger {
o := &Options{}
for _, opt := range opts {
opt(o)
}
o.addDefaults()
// this basically mimics New<type>Config, but with a custom sink
sink := zapcore.AddSync(o.DestWriter)
o.ZapOpts = append(o.ZapOpts, zap.ErrorOutput(sink))
log := zap.New(zapcore.NewCore(&KubeAwareEncoder{Encoder: o.Encoder, Verbose: o.Development}, sink, o.Level))
log = log.WithOptions(o.ZapOpts...)
return log
}
// BindFlags will parse the given flagset for zap option flags and set the log options accordingly:
// - zap-devel:
// Development Mode defaults(encoder=consoleEncoder,logLevel=Debug,stackTraceLevel=Warn)
// Production Mode defaults(encoder=jsonEncoder,logLevel=Info,stackTraceLevel=Error)
// - zap-encoder: Zap log encoding (one of 'json' or 'console')
// - zap-log-level: Zap Level to configure the verbosity of logging. Can be one of 'debug', 'info', 'error',
// or any integer value > 0 which corresponds to custom debug levels of increasing verbosity").
// - zap-stacktrace-level: Zap Level at and above which stacktraces are captured (one of 'info', 'error' or 'panic')
// - zap-time-encoding: Zap time encoding (one of 'epoch', 'millis', 'nano', 'iso8601', 'rfc3339' or 'rfc3339nano'),
// Defaults to 'epoch'.
func (o *Options) BindFlags(fs *flag.FlagSet) {
// Set Development mode value
fs.BoolVar(&o.Development, "zap-devel", o.Development,
"Development Mode defaults(encoder=consoleEncoder,logLevel=Debug,stackTraceLevel=Warn). "+
"Production Mode defaults(encoder=jsonEncoder,logLevel=Info,stackTraceLevel=Error)")
// Set Encoder value
var encVal encoderFlag
encVal.setFunc = func(fromFlag NewEncoderFunc) {
o.NewEncoder = fromFlag
}
fs.Var(&encVal, "zap-encoder", "Zap log encoding (one of 'json' or 'console')")
// Set the Log Level
var levelVal levelFlag
levelVal.setFunc = func(fromFlag zapcore.LevelEnabler) {
o.Level = fromFlag
}
fs.Var(&levelVal, "zap-log-level",
"Zap Level to configure the verbosity of logging. Can be one of 'debug', 'info', 'error', "+
"or any integer value > 0 which corresponds to custom debug levels of increasing verbosity")
// Set the StrackTrace Level
var stackVal stackTraceFlag
stackVal.setFunc = func(fromFlag zapcore.LevelEnabler) {
o.StacktraceLevel = fromFlag
}
fs.Var(&stackVal, "zap-stacktrace-level",
"Zap Level at and above which stacktraces are captured (one of 'info', 'error', 'panic').")
// Set the time encoding
var timeEncoderVal timeEncodingFlag
timeEncoderVal.setFunc = func(fromFlag zapcore.TimeEncoder) {
o.TimeEncoder = fromFlag
}
fs.Var(&timeEncoderVal, "zap-time-encoding", "Zap time encoding (one of 'epoch', 'millis', 'nano', 'iso8601', 'rfc3339' or 'rfc3339nano'). Defaults to 'epoch'.")
}
// UseFlagOptions configures the logger to use the Options set by parsing zap option flags from the CLI.
//
// opts := zap.Options{}
// opts.BindFlags(flag.CommandLine)
// flag.Parse()
// log := zap.New(zap.UseFlagOptions(&opts))
func UseFlagOptions(in *Options) Opts {
return func(o *Options) {
*o = *in
}
}

View File

@@ -0,0 +1,21 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
Package manager is required to create Controllers and provides shared dependencies such as clients, caches, schemes,
etc. Controllers must be started by calling Manager.Start.
*/
package manager

View File

@@ -0,0 +1,652 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package manager
import (
"context"
"errors"
"fmt"
"net"
"net/http"
"sync"
"sync/atomic"
"time"
"github.com/go-logr/logr"
"github.com/prometheus/client_golang/prometheus/promhttp"
"k8s.io/apimachinery/pkg/api/meta"
"k8s.io/apimachinery/pkg/runtime"
kerrors "k8s.io/apimachinery/pkg/util/errors"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/leaderelection"
"k8s.io/client-go/tools/leaderelection/resourcelock"
"k8s.io/client-go/tools/record"
"sigs.k8s.io/controller-runtime/pkg/cache"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/cluster"
"sigs.k8s.io/controller-runtime/pkg/config/v1alpha1"
"sigs.k8s.io/controller-runtime/pkg/healthz"
"sigs.k8s.io/controller-runtime/pkg/internal/httpserver"
intrec "sigs.k8s.io/controller-runtime/pkg/internal/recorder"
"sigs.k8s.io/controller-runtime/pkg/metrics"
"sigs.k8s.io/controller-runtime/pkg/runtime/inject"
"sigs.k8s.io/controller-runtime/pkg/webhook"
)
const (
// Values taken from: https://github.com/kubernetes/component-base/blob/master/config/v1alpha1/defaults.go
defaultLeaseDuration = 15 * time.Second
defaultRenewDeadline = 10 * time.Second
defaultRetryPeriod = 2 * time.Second
defaultGracefulShutdownPeriod = 30 * time.Second
defaultReadinessEndpoint = "/readyz"
defaultLivenessEndpoint = "/healthz"
defaultMetricsEndpoint = "/metrics"
)
var _ Runnable = &controllerManager{}
type controllerManager struct {
sync.Mutex
started bool
stopProcedureEngaged *int64
errChan chan error
runnables *runnables
// cluster holds a variety of methods to interact with a cluster. Required.
cluster cluster.Cluster
// recorderProvider is used to generate event recorders that will be injected into Controllers
// (and EventHandlers, Sources and Predicates).
recorderProvider *intrec.Provider
// resourceLock forms the basis for leader election
resourceLock resourcelock.Interface
// leaderElectionReleaseOnCancel defines if the manager should step back from the leader lease
// on shutdown
leaderElectionReleaseOnCancel bool
// metricsListener is used to serve prometheus metrics
metricsListener net.Listener
// metricsExtraHandlers contains extra handlers to register on http server that serves metrics.
metricsExtraHandlers map[string]http.Handler
// healthProbeListener is used to serve liveness probe
healthProbeListener net.Listener
// Readiness probe endpoint name
readinessEndpointName string
// Liveness probe endpoint name
livenessEndpointName string
// Readyz probe handler
readyzHandler *healthz.Handler
// Healthz probe handler
healthzHandler *healthz.Handler
// controllerOptions are the global controller options.
controllerOptions v1alpha1.ControllerConfigurationSpec
// Logger is the logger that should be used by this manager.
// If none is set, it defaults to log.Log global logger.
logger logr.Logger
// leaderElectionStopped is an internal channel used to signal the stopping procedure that the
// LeaderElection.Run(...) function has returned and the shutdown can proceed.
leaderElectionStopped chan struct{}
// leaderElectionCancel is used to cancel the leader election. It is distinct from internalStopper,
// because for safety reasons we need to os.Exit() when we lose the leader election, meaning that
// it must be deferred until after gracefulShutdown is done.
leaderElectionCancel context.CancelFunc
// elected is closed when this manager becomes the leader of a group of
// managers, either because it won a leader election or because no leader
// election was configured.
elected chan struct{}
// port is the port that the webhook server serves at.
port int
// host is the hostname that the webhook server binds to.
host string
// CertDir is the directory that contains the server key and certificate.
// if not set, webhook server would look up the server key and certificate in
// {TempDir}/k8s-webhook-server/serving-certs
certDir string
webhookServer *webhook.Server
// webhookServerOnce will be called in GetWebhookServer() to optionally initialize
// webhookServer if unset, and Add() it to controllerManager.
webhookServerOnce sync.Once
// leaseDuration is the duration that non-leader candidates will
// wait to force acquire leadership.
leaseDuration time.Duration
// renewDeadline is the duration that the acting controlplane will retry
// refreshing leadership before giving up.
renewDeadline time.Duration
// retryPeriod is the duration the LeaderElector clients should wait
// between tries of actions.
retryPeriod time.Duration
// gracefulShutdownTimeout is the duration given to runnable to stop
// before the manager actually returns on stop.
gracefulShutdownTimeout time.Duration
// onStoppedLeading is callled when the leader election lease is lost.
// It can be overridden for tests.
onStoppedLeading func()
// shutdownCtx is the context that can be used during shutdown. It will be cancelled
// after the gracefulShutdownTimeout ended. It must not be accessed before internalStop
// is closed because it will be nil.
shutdownCtx context.Context
internalCtx context.Context
internalCancel context.CancelFunc
// internalProceduresStop channel is used internally to the manager when coordinating
// the proper shutdown of servers. This channel is also used for dependency injection.
internalProceduresStop chan struct{}
}
type hasCache interface {
Runnable
GetCache() cache.Cache
}
// Add sets dependencies on i, and adds it to the list of Runnables to start.
func (cm *controllerManager) Add(r Runnable) error {
cm.Lock()
defer cm.Unlock()
return cm.add(r)
}
func (cm *controllerManager) add(r Runnable) error {
// Set dependencies on the object
if err := cm.SetFields(r); err != nil {
return err
}
return cm.runnables.Add(r)
}
// Deprecated: use the equivalent Options field to set a field. This method will be removed in v0.10.
func (cm *controllerManager) SetFields(i interface{}) error {
if err := cm.cluster.SetFields(i); err != nil {
return err
}
if _, err := inject.InjectorInto(cm.SetFields, i); err != nil {
return err
}
if _, err := inject.StopChannelInto(cm.internalProceduresStop, i); err != nil {
return err
}
if _, err := inject.LoggerInto(cm.logger, i); err != nil {
return err
}
return nil
}
// AddMetricsExtraHandler adds extra handler served on path to the http server that serves metrics.
func (cm *controllerManager) AddMetricsExtraHandler(path string, handler http.Handler) error {
cm.Lock()
defer cm.Unlock()
if cm.started {
return fmt.Errorf("unable to add new metrics handler because metrics endpoint has already been created")
}
if path == defaultMetricsEndpoint {
return fmt.Errorf("overriding builtin %s endpoint is not allowed", defaultMetricsEndpoint)
}
if _, found := cm.metricsExtraHandlers[path]; found {
return fmt.Errorf("can't register extra handler by duplicate path %q on metrics http server", path)
}
cm.metricsExtraHandlers[path] = handler
cm.logger.V(2).Info("Registering metrics http server extra handler", "path", path)
return nil
}
// AddHealthzCheck allows you to add Healthz checker.
func (cm *controllerManager) AddHealthzCheck(name string, check healthz.Checker) error {
cm.Lock()
defer cm.Unlock()
if cm.started {
return fmt.Errorf("unable to add new checker because healthz endpoint has already been created")
}
if cm.healthzHandler == nil {
cm.healthzHandler = &healthz.Handler{Checks: map[string]healthz.Checker{}}
}
cm.healthzHandler.Checks[name] = check
return nil
}
// AddReadyzCheck allows you to add Readyz checker.
func (cm *controllerManager) AddReadyzCheck(name string, check healthz.Checker) error {
cm.Lock()
defer cm.Unlock()
if cm.started {
return fmt.Errorf("unable to add new checker because healthz endpoint has already been created")
}
if cm.readyzHandler == nil {
cm.readyzHandler = &healthz.Handler{Checks: map[string]healthz.Checker{}}
}
cm.readyzHandler.Checks[name] = check
return nil
}
func (cm *controllerManager) GetConfig() *rest.Config {
return cm.cluster.GetConfig()
}
func (cm *controllerManager) GetClient() client.Client {
return cm.cluster.GetClient()
}
func (cm *controllerManager) GetScheme() *runtime.Scheme {
return cm.cluster.GetScheme()
}
func (cm *controllerManager) GetFieldIndexer() client.FieldIndexer {
return cm.cluster.GetFieldIndexer()
}
func (cm *controllerManager) GetCache() cache.Cache {
return cm.cluster.GetCache()
}
func (cm *controllerManager) GetEventRecorderFor(name string) record.EventRecorder {
return cm.cluster.GetEventRecorderFor(name)
}
func (cm *controllerManager) GetRESTMapper() meta.RESTMapper {
return cm.cluster.GetRESTMapper()
}
func (cm *controllerManager) GetAPIReader() client.Reader {
return cm.cluster.GetAPIReader()
}
func (cm *controllerManager) GetWebhookServer() *webhook.Server {
cm.webhookServerOnce.Do(func() {
if cm.webhookServer == nil {
cm.webhookServer = &webhook.Server{
Port: cm.port,
Host: cm.host,
CertDir: cm.certDir,
}
}
if err := cm.Add(cm.webhookServer); err != nil {
panic(fmt.Sprintf("unable to add webhook server to the controller manager: %s", err))
}
})
return cm.webhookServer
}
func (cm *controllerManager) GetLogger() logr.Logger {
return cm.logger
}
func (cm *controllerManager) GetControllerOptions() v1alpha1.ControllerConfigurationSpec {
return cm.controllerOptions
}
func (cm *controllerManager) serveMetrics() {
handler := promhttp.HandlerFor(metrics.Registry, promhttp.HandlerOpts{
ErrorHandling: promhttp.HTTPErrorOnError,
})
// TODO(JoelSpeed): Use existing Kubernetes machinery for serving metrics
mux := http.NewServeMux()
mux.Handle(defaultMetricsEndpoint, handler)
for path, extraHandler := range cm.metricsExtraHandlers {
mux.Handle(path, extraHandler)
}
server := httpserver.New(mux)
go cm.httpServe("metrics", cm.logger.WithValues("path", defaultMetricsEndpoint), server, cm.metricsListener)
}
func (cm *controllerManager) serveHealthProbes() {
mux := http.NewServeMux()
server := httpserver.New(mux)
if cm.readyzHandler != nil {
mux.Handle(cm.readinessEndpointName, http.StripPrefix(cm.readinessEndpointName, cm.readyzHandler))
// Append '/' suffix to handle subpaths
mux.Handle(cm.readinessEndpointName+"/", http.StripPrefix(cm.readinessEndpointName, cm.readyzHandler))
}
if cm.healthzHandler != nil {
mux.Handle(cm.livenessEndpointName, http.StripPrefix(cm.livenessEndpointName, cm.healthzHandler))
// Append '/' suffix to handle subpaths
mux.Handle(cm.livenessEndpointName+"/", http.StripPrefix(cm.livenessEndpointName, cm.healthzHandler))
}
go cm.httpServe("health probe", cm.logger, server, cm.healthProbeListener)
}
func (cm *controllerManager) httpServe(kind string, log logr.Logger, server *http.Server, ln net.Listener) {
log = log.WithValues("kind", kind, "addr", ln.Addr())
go func() {
log.Info("Starting server")
if err := server.Serve(ln); err != nil {
if errors.Is(err, http.ErrServerClosed) {
return
}
if atomic.LoadInt64(cm.stopProcedureEngaged) > 0 {
// There might be cases where connections are still open and we try to shutdown
// but not having enough time to close the connection causes an error in Serve
//
// In that case we want to avoid returning an error to the main error channel.
log.Error(err, "error on Serve after stop has been engaged")
return
}
cm.errChan <- err
}
}()
// Shutdown the server when stop is closed.
<-cm.internalProceduresStop
if err := server.Shutdown(cm.shutdownCtx); err != nil {
if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) {
// Avoid logging context related errors.
return
}
if atomic.LoadInt64(cm.stopProcedureEngaged) > 0 {
cm.logger.Error(err, "error on Shutdown after stop has been engaged")
return
}
cm.errChan <- err
}
}
// Start starts the manager and waits indefinitely.
// There is only two ways to have start return:
// An error has occurred during in one of the internal operations,
// such as leader election, cache start, webhooks, and so on.
// Or, the context is cancelled.
func (cm *controllerManager) Start(ctx context.Context) (err error) {
cm.Lock()
if cm.started {
cm.Unlock()
return errors.New("manager already started")
}
var ready bool
defer func() {
// Only unlock the manager if we haven't reached
// the internal readiness condition.
if !ready {
cm.Unlock()
}
}()
// Initialize the internal context.
cm.internalCtx, cm.internalCancel = context.WithCancel(ctx)
// This chan indicates that stop is complete, in other words all runnables have returned or timeout on stop request
stopComplete := make(chan struct{})
defer close(stopComplete)
// This must be deferred after closing stopComplete, otherwise we deadlock.
defer func() {
// https://hips.hearstapps.com/hmg-prod.s3.amazonaws.com/images/gettyimages-459889618-1533579787.jpg
stopErr := cm.engageStopProcedure(stopComplete)
if stopErr != nil {
if err != nil {
// Utilerrors.Aggregate allows to use errors.Is for all contained errors
// whereas fmt.Errorf allows wrapping at most one error which means the
// other one can not be found anymore.
err = kerrors.NewAggregate([]error{err, stopErr})
} else {
err = stopErr
}
}
}()
// Add the cluster runnable.
if err := cm.add(cm.cluster); err != nil {
return fmt.Errorf("failed to add cluster to runnables: %w", err)
}
// Metrics should be served whether the controller is leader or not.
// (If we don't serve metrics for non-leaders, prometheus will still scrape
// the pod but will get a connection refused).
if cm.metricsListener != nil {
cm.serveMetrics()
}
// Serve health probes.
if cm.healthProbeListener != nil {
cm.serveHealthProbes()
}
// First start any webhook servers, which includes conversion, validation, and defaulting
// webhooks that are registered.
//
// WARNING: Webhooks MUST start before any cache is populated, otherwise there is a race condition
// between conversion webhooks and the cache sync (usually initial list) which causes the webhooks
// to never start because no cache can be populated.
if err := cm.runnables.Webhooks.Start(cm.internalCtx); err != nil {
if !errors.Is(err, wait.ErrWaitTimeout) {
return err
}
}
// Start and wait for caches.
if err := cm.runnables.Caches.Start(cm.internalCtx); err != nil {
if !errors.Is(err, wait.ErrWaitTimeout) {
return err
}
}
// Start the non-leaderelection Runnables after the cache has synced.
if err := cm.runnables.Others.Start(cm.internalCtx); err != nil {
if !errors.Is(err, wait.ErrWaitTimeout) {
return err
}
}
// Start the leader election and all required runnables.
{
ctx, cancel := context.WithCancel(context.Background())
cm.leaderElectionCancel = cancel
go func() {
if cm.resourceLock != nil {
if err := cm.startLeaderElection(ctx); err != nil {
cm.errChan <- err
}
} else {
// Treat not having leader election enabled the same as being elected.
if err := cm.startLeaderElectionRunnables(); err != nil {
cm.errChan <- err
}
close(cm.elected)
}
}()
}
ready = true
cm.Unlock()
select {
case <-ctx.Done():
// We are done
return nil
case err := <-cm.errChan:
// Error starting or running a runnable
return err
}
}
// engageStopProcedure signals all runnables to stop, reads potential errors
// from the errChan and waits for them to end. It must not be called more than once.
func (cm *controllerManager) engageStopProcedure(stopComplete <-chan struct{}) error {
if !atomic.CompareAndSwapInt64(cm.stopProcedureEngaged, 0, 1) {
return errors.New("stop procedure already engaged")
}
// Populate the shutdown context, this operation MUST be done before
// closing the internalProceduresStop channel.
//
// The shutdown context immediately expires if the gracefulShutdownTimeout is not set.
var shutdownCancel context.CancelFunc
cm.shutdownCtx, shutdownCancel = context.WithTimeout(context.Background(), cm.gracefulShutdownTimeout)
defer shutdownCancel()
// Start draining the errors before acquiring the lock to make sure we don't deadlock
// if something that has the lock is blocked on trying to write into the unbuffered
// channel after something else already wrote into it.
var closeOnce sync.Once
go func() {
for {
// Closing in the for loop is required to avoid race conditions between
// the closure of all internal procedures and making sure to have a reader off the error channel.
closeOnce.Do(func() {
// Cancel the internal stop channel and wait for the procedures to stop and complete.
close(cm.internalProceduresStop)
cm.internalCancel()
})
select {
case err, ok := <-cm.errChan:
if ok {
cm.logger.Error(err, "error received after stop sequence was engaged")
}
case <-stopComplete:
return
}
}
}()
// We want to close this after the other runnables stop, because we don't
// want things like leader election to try and emit events on a closed
// channel
defer cm.recorderProvider.Stop(cm.shutdownCtx)
defer func() {
// Cancel leader election only after we waited. It will os.Exit() the app for safety.
if cm.resourceLock != nil {
// After asking the context to be cancelled, make sure
// we wait for the leader stopped channel to be closed, otherwise
// we might encounter race conditions between this code
// and the event recorder, which is used within leader election code.
cm.leaderElectionCancel()
<-cm.leaderElectionStopped
}
}()
go func() {
// First stop the non-leader election runnables.
cm.logger.Info("Stopping and waiting for non leader election runnables")
cm.runnables.Others.StopAndWait(cm.shutdownCtx)
// Stop all the leader election runnables, which includes reconcilers.
cm.logger.Info("Stopping and waiting for leader election runnables")
cm.runnables.LeaderElection.StopAndWait(cm.shutdownCtx)
// Stop the caches before the leader election runnables, this is an important
// step to make sure that we don't race with the reconcilers by receiving more events
// from the API servers and enqueueing them.
cm.logger.Info("Stopping and waiting for caches")
cm.runnables.Caches.StopAndWait(cm.shutdownCtx)
// Webhooks should come last, as they might be still serving some requests.
cm.logger.Info("Stopping and waiting for webhooks")
cm.runnables.Webhooks.StopAndWait(cm.shutdownCtx)
// Proceed to close the manager and overall shutdown context.
cm.logger.Info("Wait completed, proceeding to shutdown the manager")
shutdownCancel()
}()
<-cm.shutdownCtx.Done()
if err := cm.shutdownCtx.Err(); err != nil && !errors.Is(err, context.Canceled) {
if errors.Is(err, context.DeadlineExceeded) {
if cm.gracefulShutdownTimeout > 0 {
return fmt.Errorf("failed waiting for all runnables to end within grace period of %s: %w", cm.gracefulShutdownTimeout, err)
}
return nil
}
// For any other error, return the error.
return err
}
return nil
}
func (cm *controllerManager) startLeaderElectionRunnables() error {
return cm.runnables.LeaderElection.Start(cm.internalCtx)
}
func (cm *controllerManager) startLeaderElection(ctx context.Context) (err error) {
l, err := leaderelection.NewLeaderElector(leaderelection.LeaderElectionConfig{
Lock: cm.resourceLock,
LeaseDuration: cm.leaseDuration,
RenewDeadline: cm.renewDeadline,
RetryPeriod: cm.retryPeriod,
Callbacks: leaderelection.LeaderCallbacks{
OnStartedLeading: func(_ context.Context) {
if err := cm.startLeaderElectionRunnables(); err != nil {
cm.errChan <- err
return
}
close(cm.elected)
},
OnStoppedLeading: func() {
if cm.onStoppedLeading != nil {
cm.onStoppedLeading()
}
// Make sure graceful shutdown is skipped if we lost the leader lock without
// intending to.
cm.gracefulShutdownTimeout = time.Duration(0)
// Most implementations of leader election log.Fatal() here.
// Since Start is wrapped in log.Fatal when called, we can just return
// an error here which will cause the program to exit.
cm.errChan <- errors.New("leader election lost")
},
},
ReleaseOnCancel: cm.leaderElectionReleaseOnCancel,
})
if err != nil {
return err
}
// Start the leader elector process
go func() {
l.Run(ctx)
<-ctx.Done()
close(cm.leaderElectionStopped)
}()
return nil
}
func (cm *controllerManager) Elected() <-chan struct{} {
return cm.elected
}

View File

@@ -0,0 +1,638 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package manager
import (
"context"
"fmt"
"net"
"net/http"
"reflect"
"time"
"github.com/go-logr/logr"
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/leaderelection/resourcelock"
"k8s.io/client-go/tools/record"
"k8s.io/utils/pointer"
"sigs.k8s.io/controller-runtime/pkg/cache"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/cluster"
"sigs.k8s.io/controller-runtime/pkg/config"
"sigs.k8s.io/controller-runtime/pkg/config/v1alpha1"
"sigs.k8s.io/controller-runtime/pkg/healthz"
intrec "sigs.k8s.io/controller-runtime/pkg/internal/recorder"
"sigs.k8s.io/controller-runtime/pkg/leaderelection"
"sigs.k8s.io/controller-runtime/pkg/log"
"sigs.k8s.io/controller-runtime/pkg/metrics"
"sigs.k8s.io/controller-runtime/pkg/recorder"
"sigs.k8s.io/controller-runtime/pkg/runtime/inject"
"sigs.k8s.io/controller-runtime/pkg/webhook"
)
// Manager initializes shared dependencies such as Caches and Clients, and provides them to Runnables.
// A Manager is required to create Controllers.
type Manager interface {
// Cluster holds a variety of methods to interact with a cluster.
cluster.Cluster
// Add will set requested dependencies on the component, and cause the component to be
// started when Start is called. Add will inject any dependencies for which the argument
// implements the inject interface - e.g. inject.Client.
// Depending on if a Runnable implements LeaderElectionRunnable interface, a Runnable can be run in either
// non-leaderelection mode (always running) or leader election mode (managed by leader election if enabled).
Add(Runnable) error
// Elected is closed when this manager is elected leader of a group of
// managers, either because it won a leader election or because no leader
// election was configured.
Elected() <-chan struct{}
// AddMetricsExtraHandler adds an extra handler served on path to the http server that serves metrics.
// Might be useful to register some diagnostic endpoints e.g. pprof. Note that these endpoints meant to be
// sensitive and shouldn't be exposed publicly.
// If the simple path -> handler mapping offered here is not enough, a new http server/listener should be added as
// Runnable to the manager via Add method.
AddMetricsExtraHandler(path string, handler http.Handler) error
// AddHealthzCheck allows you to add Healthz checker
AddHealthzCheck(name string, check healthz.Checker) error
// AddReadyzCheck allows you to add Readyz checker
AddReadyzCheck(name string, check healthz.Checker) error
// Start starts all registered Controllers and blocks until the context is cancelled.
// Returns an error if there is an error starting any controller.
//
// If LeaderElection is used, the binary must be exited immediately after this returns,
// otherwise components that need leader election might continue to run after the leader
// lock was lost.
Start(ctx context.Context) error
// GetWebhookServer returns a webhook.Server
GetWebhookServer() *webhook.Server
// GetLogger returns this manager's logger.
GetLogger() logr.Logger
// GetControllerOptions returns controller global configuration options.
GetControllerOptions() v1alpha1.ControllerConfigurationSpec
}
// Options are the arguments for creating a new Manager.
type Options struct {
// Scheme is the scheme used to resolve runtime.Objects to GroupVersionKinds / Resources.
// Defaults to the kubernetes/client-go scheme.Scheme, but it's almost always better
// to pass your own scheme in. See the documentation in pkg/scheme for more information.
Scheme *runtime.Scheme
// MapperProvider provides the rest mapper used to map go types to Kubernetes APIs
MapperProvider func(c *rest.Config) (meta.RESTMapper, error)
// SyncPeriod determines the minimum frequency at which watched resources are
// reconciled. A lower period will correct entropy more quickly, but reduce
// responsiveness to change if there are many watched resources. Change this
// value only if you know what you are doing. Defaults to 10 hours if unset.
// there will a 10 percent jitter between the SyncPeriod of all controllers
// so that all controllers will not send list requests simultaneously.
//
// This applies to all controllers.
//
// A period sync happens for two reasons:
// 1. To insure against a bug in the controller that causes an object to not
// be requeued, when it otherwise should be requeued.
// 2. To insure against an unknown bug in controller-runtime, or its dependencies,
// that causes an object to not be requeued, when it otherwise should be
// requeued, or to be removed from the queue, when it otherwise should not
// be removed.
//
// If you want
// 1. to insure against missed watch events, or
// 2. to poll services that cannot be watched,
// then we recommend that, instead of changing the default period, the
// controller requeue, with a constant duration `t`, whenever the controller
// is "done" with an object, and would otherwise not requeue it, i.e., we
// recommend the `Reconcile` function return `reconcile.Result{RequeueAfter: t}`,
// instead of `reconcile.Result{}`.
SyncPeriod *time.Duration
// Logger is the logger that should be used by this manager.
// If none is set, it defaults to log.Log global logger.
Logger logr.Logger
// LeaderElection determines whether or not to use leader election when
// starting the manager.
LeaderElection bool
// LeaderElectionResourceLock determines which resource lock to use for leader election,
// defaults to "leases". Change this value only if you know what you are doing.
//
// If you are using `configmaps`/`endpoints` resource lock and want to migrate to "leases",
// you might do so by migrating to the respective multilock first ("configmapsleases" or "endpointsleases"),
// which will acquire a leader lock on both resources.
// After all your users have migrated to the multilock, you can go ahead and migrate to "leases".
// Please also keep in mind, that users might skip versions of your controller.
//
// Note: before controller-runtime version v0.7, it was set to "configmaps".
// And from v0.7 to v0.11, the default was "configmapsleases", which was
// used to migrate from configmaps to leases.
// Since the default was "configmapsleases" for over a year, spanning five minor releases,
// any actively maintained operators are very likely to have a released version that uses
// "configmapsleases". Therefore defaulting to "leases" should be safe since v0.12.
//
// So, what do you have to do when you are updating your controller-runtime dependency
// from a lower version to v0.12 or newer?
// - If your operator matches at least one of these conditions:
// - the LeaderElectionResourceLock in your operator has already been explicitly set to "leases"
// - the old controller-runtime version is between v0.7.0 and v0.11.x and the
// LeaderElectionResourceLock wasn't set or was set to "leases"/"configmapsleases"/"endpointsleases"
// feel free to update controller-runtime to v0.12 or newer.
// - Otherwise, you may have to take these steps:
// 1. update controller-runtime to v0.12 or newer in your go.mod
// 2. set LeaderElectionResourceLock to "configmapsleases" (or "endpointsleases")
// 3. package your operator and upgrade it in all your clusters
// 4. only if you have finished 3, you can remove the LeaderElectionResourceLock to use the default "leases"
// Otherwise, your operator might end up with multiple running instances that
// each acquired leadership through different resource locks during upgrades and thus
// act on the same resources concurrently.
LeaderElectionResourceLock string
// LeaderElectionNamespace determines the namespace in which the leader
// election resource will be created.
LeaderElectionNamespace string
// LeaderElectionID determines the name of the resource that leader election
// will use for holding the leader lock.
LeaderElectionID string
// LeaderElectionConfig can be specified to override the default configuration
// that is used to build the leader election client.
LeaderElectionConfig *rest.Config
// LeaderElectionReleaseOnCancel defines if the leader should step down voluntarily
// when the Manager ends. This requires the binary to immediately end when the
// Manager is stopped, otherwise this setting is unsafe. Setting this significantly
// speeds up voluntary leader transitions as the new leader doesn't have to wait
// LeaseDuration time first.
LeaderElectionReleaseOnCancel bool
// LeaseDuration is the duration that non-leader candidates will
// wait to force acquire leadership. This is measured against time of
// last observed ack. Default is 15 seconds.
LeaseDuration *time.Duration
// RenewDeadline is the duration that the acting controlplane will retry
// refreshing leadership before giving up. Default is 10 seconds.
RenewDeadline *time.Duration
// RetryPeriod is the duration the LeaderElector clients should wait
// between tries of actions. Default is 2 seconds.
RetryPeriod *time.Duration
// Namespace, if specified, restricts the manager's cache to watch objects in
// the desired namespace. Defaults to all namespaces.
//
// Note: If a namespace is specified, controllers can still Watch for a
// cluster-scoped resource (e.g Node). For namespaced resources, the cache
// will only hold objects from the desired namespace.
Namespace string
// MetricsBindAddress is the TCP address that the controller should bind to
// for serving prometheus metrics.
// It can be set to "0" to disable the metrics serving.
MetricsBindAddress string
// HealthProbeBindAddress is the TCP address that the controller should bind to
// for serving health probes
HealthProbeBindAddress string
// Readiness probe endpoint name, defaults to "readyz"
ReadinessEndpointName string
// Liveness probe endpoint name, defaults to "healthz"
LivenessEndpointName string
// Port is the port that the webhook server serves at.
// It is used to set webhook.Server.Port if WebhookServer is not set.
Port int
// Host is the hostname that the webhook server binds to.
// It is used to set webhook.Server.Host if WebhookServer is not set.
Host string
// CertDir is the directory that contains the server key and certificate.
// If not set, webhook server would look up the server key and certificate in
// {TempDir}/k8s-webhook-server/serving-certs. The server key and certificate
// must be named tls.key and tls.crt, respectively.
// It is used to set webhook.Server.CertDir if WebhookServer is not set.
CertDir string
// WebhookServer is an externally configured webhook.Server. By default,
// a Manager will create a default server using Port, Host, and CertDir;
// if this is set, the Manager will use this server instead.
WebhookServer *webhook.Server
// Functions to allow for a user to customize values that will be injected.
// NewCache is the function that will create the cache to be used
// by the manager. If not set this will use the default new cache function.
NewCache cache.NewCacheFunc
// NewClient is the func that creates the client to be used by the manager.
// If not set this will create the default DelegatingClient that will
// use the cache for reads and the client for writes.
NewClient cluster.NewClientFunc
// BaseContext is the function that provides Context values to Runnables
// managed by the Manager. If a BaseContext function isn't provided, Runnables
// will receive a new Background Context instead.
BaseContext BaseContextFunc
// ClientDisableCacheFor tells the client that, if any cache is used, to bypass it
// for the given objects.
ClientDisableCacheFor []client.Object
// DryRunClient specifies whether the client should be configured to enforce
// dryRun mode.
DryRunClient bool
// EventBroadcaster records Events emitted by the manager and sends them to the Kubernetes API
// Use this to customize the event correlator and spam filter
//
// Deprecated: using this may cause goroutine leaks if the lifetime of your manager or controllers
// is shorter than the lifetime of your process.
EventBroadcaster record.EventBroadcaster
// GracefulShutdownTimeout is the duration given to runnable to stop before the manager actually returns on stop.
// To disable graceful shutdown, set to time.Duration(0)
// To use graceful shutdown without timeout, set to a negative duration, e.G. time.Duration(-1)
// The graceful shutdown is skipped for safety reasons in case the leader election lease is lost.
GracefulShutdownTimeout *time.Duration
// Controller contains global configuration options for controllers
// registered within this manager.
// +optional
Controller v1alpha1.ControllerConfigurationSpec
// makeBroadcaster allows deferring the creation of the broadcaster to
// avoid leaking goroutines if we never call Start on this manager. It also
// returns whether or not this is a "owned" broadcaster, and as such should be
// stopped with the manager.
makeBroadcaster intrec.EventBroadcasterProducer
// Dependency injection for testing
newRecorderProvider func(config *rest.Config, scheme *runtime.Scheme, logger logr.Logger, makeBroadcaster intrec.EventBroadcasterProducer) (*intrec.Provider, error)
newResourceLock func(config *rest.Config, recorderProvider recorder.Provider, options leaderelection.Options) (resourcelock.Interface, error)
newMetricsListener func(addr string) (net.Listener, error)
newHealthProbeListener func(addr string) (net.Listener, error)
}
// BaseContextFunc is a function used to provide a base Context to Runnables
// managed by a Manager.
type BaseContextFunc func() context.Context
// Runnable allows a component to be started.
// It's very important that Start blocks until
// it's done running.
type Runnable interface {
// Start starts running the component. The component will stop running
// when the context is closed. Start blocks until the context is closed or
// an error occurs.
Start(context.Context) error
}
// RunnableFunc implements Runnable using a function.
// It's very important that the given function block
// until it's done running.
type RunnableFunc func(context.Context) error
// Start implements Runnable.
func (r RunnableFunc) Start(ctx context.Context) error {
return r(ctx)
}
// LeaderElectionRunnable knows if a Runnable needs to be run in the leader election mode.
type LeaderElectionRunnable interface {
// NeedLeaderElection returns true if the Runnable needs to be run in the leader election mode.
// e.g. controllers need to be run in leader election mode, while webhook server doesn't.
NeedLeaderElection() bool
}
// New returns a new Manager for creating Controllers.
func New(config *rest.Config, options Options) (Manager, error) {
// Set default values for options fields
options = setOptionsDefaults(options)
cluster, err := cluster.New(config, func(clusterOptions *cluster.Options) {
clusterOptions.Scheme = options.Scheme
clusterOptions.MapperProvider = options.MapperProvider
clusterOptions.Logger = options.Logger
clusterOptions.SyncPeriod = options.SyncPeriod
clusterOptions.Namespace = options.Namespace
clusterOptions.NewCache = options.NewCache
clusterOptions.NewClient = options.NewClient
clusterOptions.ClientDisableCacheFor = options.ClientDisableCacheFor
clusterOptions.DryRunClient = options.DryRunClient
clusterOptions.EventBroadcaster = options.EventBroadcaster //nolint:staticcheck
})
if err != nil {
return nil, err
}
// Create the recorder provider to inject event recorders for the components.
// TODO(directxman12): the log for the event provider should have a context (name, tags, etc) specific
// to the particular controller that it's being injected into, rather than a generic one like is here.
recorderProvider, err := options.newRecorderProvider(config, cluster.GetScheme(), options.Logger.WithName("events"), options.makeBroadcaster)
if err != nil {
return nil, err
}
// Create the resource lock to enable leader election)
var leaderConfig *rest.Config
var leaderRecorderProvider *intrec.Provider
if options.LeaderElectionConfig == nil {
leaderConfig = rest.CopyConfig(config)
leaderRecorderProvider = recorderProvider
} else {
leaderConfig = rest.CopyConfig(options.LeaderElectionConfig)
leaderRecorderProvider, err = options.newRecorderProvider(leaderConfig, cluster.GetScheme(), options.Logger.WithName("events"), options.makeBroadcaster)
if err != nil {
return nil, err
}
}
resourceLock, err := options.newResourceLock(leaderConfig, leaderRecorderProvider, leaderelection.Options{
LeaderElection: options.LeaderElection,
LeaderElectionResourceLock: options.LeaderElectionResourceLock,
LeaderElectionID: options.LeaderElectionID,
LeaderElectionNamespace: options.LeaderElectionNamespace,
})
if err != nil {
return nil, err
}
// Create the metrics listener. This will throw an error if the metrics bind
// address is invalid or already in use.
metricsListener, err := options.newMetricsListener(options.MetricsBindAddress)
if err != nil {
return nil, err
}
// By default we have no extra endpoints to expose on metrics http server.
metricsExtraHandlers := make(map[string]http.Handler)
// Create health probes listener. This will throw an error if the bind
// address is invalid or already in use.
healthProbeListener, err := options.newHealthProbeListener(options.HealthProbeBindAddress)
if err != nil {
return nil, err
}
errChan := make(chan error)
runnables := newRunnables(options.BaseContext, errChan)
return &controllerManager{
stopProcedureEngaged: pointer.Int64(0),
cluster: cluster,
runnables: runnables,
errChan: errChan,
recorderProvider: recorderProvider,
resourceLock: resourceLock,
metricsListener: metricsListener,
metricsExtraHandlers: metricsExtraHandlers,
controllerOptions: options.Controller,
logger: options.Logger,
elected: make(chan struct{}),
port: options.Port,
host: options.Host,
certDir: options.CertDir,
webhookServer: options.WebhookServer,
leaseDuration: *options.LeaseDuration,
renewDeadline: *options.RenewDeadline,
retryPeriod: *options.RetryPeriod,
healthProbeListener: healthProbeListener,
readinessEndpointName: options.ReadinessEndpointName,
livenessEndpointName: options.LivenessEndpointName,
gracefulShutdownTimeout: *options.GracefulShutdownTimeout,
internalProceduresStop: make(chan struct{}),
leaderElectionStopped: make(chan struct{}),
leaderElectionReleaseOnCancel: options.LeaderElectionReleaseOnCancel,
}, nil
}
// AndFrom will use a supplied type and convert to Options
// any options already set on Options will be ignored, this is used to allow
// cli flags to override anything specified in the config file.
func (o Options) AndFrom(loader config.ControllerManagerConfiguration) (Options, error) {
if inj, wantsScheme := loader.(inject.Scheme); wantsScheme {
err := inj.InjectScheme(o.Scheme)
if err != nil {
return o, err
}
}
newObj, err := loader.Complete()
if err != nil {
return o, err
}
o = o.setLeaderElectionConfig(newObj)
if o.SyncPeriod == nil && newObj.SyncPeriod != nil {
o.SyncPeriod = &newObj.SyncPeriod.Duration
}
if o.Namespace == "" && newObj.CacheNamespace != "" {
o.Namespace = newObj.CacheNamespace
}
if o.MetricsBindAddress == "" && newObj.Metrics.BindAddress != "" {
o.MetricsBindAddress = newObj.Metrics.BindAddress
}
if o.HealthProbeBindAddress == "" && newObj.Health.HealthProbeBindAddress != "" {
o.HealthProbeBindAddress = newObj.Health.HealthProbeBindAddress
}
if o.ReadinessEndpointName == "" && newObj.Health.ReadinessEndpointName != "" {
o.ReadinessEndpointName = newObj.Health.ReadinessEndpointName
}
if o.LivenessEndpointName == "" && newObj.Health.LivenessEndpointName != "" {
o.LivenessEndpointName = newObj.Health.LivenessEndpointName
}
if o.Port == 0 && newObj.Webhook.Port != nil {
o.Port = *newObj.Webhook.Port
}
if o.Host == "" && newObj.Webhook.Host != "" {
o.Host = newObj.Webhook.Host
}
if o.CertDir == "" && newObj.Webhook.CertDir != "" {
o.CertDir = newObj.Webhook.CertDir
}
if newObj.Controller != nil {
if o.Controller.CacheSyncTimeout == nil && newObj.Controller.CacheSyncTimeout != nil {
o.Controller.CacheSyncTimeout = newObj.Controller.CacheSyncTimeout
}
if len(o.Controller.GroupKindConcurrency) == 0 && len(newObj.Controller.GroupKindConcurrency) > 0 {
o.Controller.GroupKindConcurrency = newObj.Controller.GroupKindConcurrency
}
}
return o, nil
}
// AndFromOrDie will use options.AndFrom() and will panic if there are errors.
func (o Options) AndFromOrDie(loader config.ControllerManagerConfiguration) Options {
o, err := o.AndFrom(loader)
if err != nil {
panic(fmt.Sprintf("could not parse config file: %v", err))
}
return o
}
func (o Options) setLeaderElectionConfig(obj v1alpha1.ControllerManagerConfigurationSpec) Options {
if obj.LeaderElection == nil {
// The source does not have any configuration; noop
return o
}
if !o.LeaderElection && obj.LeaderElection.LeaderElect != nil {
o.LeaderElection = *obj.LeaderElection.LeaderElect
}
if o.LeaderElectionResourceLock == "" && obj.LeaderElection.ResourceLock != "" {
o.LeaderElectionResourceLock = obj.LeaderElection.ResourceLock
}
if o.LeaderElectionNamespace == "" && obj.LeaderElection.ResourceNamespace != "" {
o.LeaderElectionNamespace = obj.LeaderElection.ResourceNamespace
}
if o.LeaderElectionID == "" && obj.LeaderElection.ResourceName != "" {
o.LeaderElectionID = obj.LeaderElection.ResourceName
}
if o.LeaseDuration == nil && !reflect.DeepEqual(obj.LeaderElection.LeaseDuration, metav1.Duration{}) {
o.LeaseDuration = &obj.LeaderElection.LeaseDuration.Duration
}
if o.RenewDeadline == nil && !reflect.DeepEqual(obj.LeaderElection.RenewDeadline, metav1.Duration{}) {
o.RenewDeadline = &obj.LeaderElection.RenewDeadline.Duration
}
if o.RetryPeriod == nil && !reflect.DeepEqual(obj.LeaderElection.RetryPeriod, metav1.Duration{}) {
o.RetryPeriod = &obj.LeaderElection.RetryPeriod.Duration
}
return o
}
// defaultHealthProbeListener creates the default health probes listener bound to the given address.
func defaultHealthProbeListener(addr string) (net.Listener, error) {
if addr == "" || addr == "0" {
return nil, nil
}
ln, err := net.Listen("tcp", addr)
if err != nil {
return nil, fmt.Errorf("error listening on %s: %w", addr, err)
}
return ln, nil
}
// defaultBaseContext is used as the BaseContext value in Options if one
// has not already been set.
func defaultBaseContext() context.Context {
return context.Background()
}
// setOptionsDefaults set default values for Options fields.
func setOptionsDefaults(options Options) Options {
// Allow newResourceLock to be mocked
if options.newResourceLock == nil {
options.newResourceLock = leaderelection.NewResourceLock
}
// Allow newRecorderProvider to be mocked
if options.newRecorderProvider == nil {
options.newRecorderProvider = intrec.NewProvider
}
// This is duplicated with pkg/cluster, we need it here
// for the leader election and there to provide the user with
// an EventBroadcaster
if options.EventBroadcaster == nil {
// defer initialization to avoid leaking by default
options.makeBroadcaster = func() (record.EventBroadcaster, bool) {
return record.NewBroadcaster(), true
}
} else {
options.makeBroadcaster = func() (record.EventBroadcaster, bool) {
return options.EventBroadcaster, false
}
}
if options.newMetricsListener == nil {
options.newMetricsListener = metrics.NewListener
}
leaseDuration, renewDeadline, retryPeriod := defaultLeaseDuration, defaultRenewDeadline, defaultRetryPeriod
if options.LeaseDuration == nil {
options.LeaseDuration = &leaseDuration
}
if options.RenewDeadline == nil {
options.RenewDeadline = &renewDeadline
}
if options.RetryPeriod == nil {
options.RetryPeriod = &retryPeriod
}
if options.ReadinessEndpointName == "" {
options.ReadinessEndpointName = defaultReadinessEndpoint
}
if options.LivenessEndpointName == "" {
options.LivenessEndpointName = defaultLivenessEndpoint
}
if options.newHealthProbeListener == nil {
options.newHealthProbeListener = defaultHealthProbeListener
}
if options.GracefulShutdownTimeout == nil {
gracefulShutdownTimeout := defaultGracefulShutdownPeriod
options.GracefulShutdownTimeout = &gracefulShutdownTimeout
}
if options.Logger.GetSink() == nil {
options.Logger = log.Log
}
if options.BaseContext == nil {
options.BaseContext = defaultBaseContext
}
return options
}

View File

@@ -0,0 +1,297 @@
package manager
import (
"context"
"errors"
"sync"
"sigs.k8s.io/controller-runtime/pkg/webhook"
)
var (
errRunnableGroupStopped = errors.New("can't accept new runnable as stop procedure is already engaged")
)
// readyRunnable encapsulates a runnable with
// a ready check.
type readyRunnable struct {
Runnable
Check runnableCheck
signalReady bool
}
// runnableCheck can be passed to Add() to let the runnable group determine that a
// runnable is ready. A runnable check should block until a runnable is ready,
// if the returned result is false, the runnable is considered not ready and failed.
type runnableCheck func(ctx context.Context) bool
// runnables handles all the runnables for a manager by grouping them accordingly to their
// type (webhooks, caches etc.).
type runnables struct {
Webhooks *runnableGroup
Caches *runnableGroup
LeaderElection *runnableGroup
Others *runnableGroup
}
// newRunnables creates a new runnables object.
func newRunnables(baseContext BaseContextFunc, errChan chan error) *runnables {
return &runnables{
Webhooks: newRunnableGroup(baseContext, errChan),
Caches: newRunnableGroup(baseContext, errChan),
LeaderElection: newRunnableGroup(baseContext, errChan),
Others: newRunnableGroup(baseContext, errChan),
}
}
// Add adds a runnable to closest group of runnable that they belong to.
//
// Add should be able to be called before and after Start, but not after StopAndWait.
// Add should return an error when called during StopAndWait.
// The runnables added before Start are started when Start is called.
// The runnables added after Start are started directly.
func (r *runnables) Add(fn Runnable) error {
switch runnable := fn.(type) {
case hasCache:
return r.Caches.Add(fn, func(ctx context.Context) bool {
return runnable.GetCache().WaitForCacheSync(ctx)
})
case *webhook.Server:
return r.Webhooks.Add(fn, nil)
case LeaderElectionRunnable:
if !runnable.NeedLeaderElection() {
return r.Others.Add(fn, nil)
}
return r.LeaderElection.Add(fn, nil)
default:
return r.LeaderElection.Add(fn, nil)
}
}
// runnableGroup manages a group of runnables that are
// meant to be running together until StopAndWait is called.
//
// Runnables can be added to a group after the group has started
// but not after it's stopped or while shutting down.
type runnableGroup struct {
ctx context.Context
cancel context.CancelFunc
start sync.Mutex
startOnce sync.Once
started bool
startQueue []*readyRunnable
startReadyCh chan *readyRunnable
stop sync.RWMutex
stopOnce sync.Once
stopped bool
// errChan is the error channel passed by the caller
// when the group is created.
// All errors are forwarded to this channel once they occur.
errChan chan error
// ch is the internal channel where the runnables are read off from.
ch chan *readyRunnable
// wg is an internal sync.WaitGroup that allows us to properly stop
// and wait for all the runnables to finish before returning.
wg *sync.WaitGroup
}
func newRunnableGroup(baseContext BaseContextFunc, errChan chan error) *runnableGroup {
r := &runnableGroup{
startReadyCh: make(chan *readyRunnable),
errChan: errChan,
ch: make(chan *readyRunnable),
wg: new(sync.WaitGroup),
}
r.ctx, r.cancel = context.WithCancel(baseContext())
return r
}
// Started returns true if the group has started.
func (r *runnableGroup) Started() bool {
r.start.Lock()
defer r.start.Unlock()
return r.started
}
// Start starts the group and waits for all
// initially registered runnables to start.
// It can only be called once, subsequent calls have no effect.
func (r *runnableGroup) Start(ctx context.Context) error {
var retErr error
r.startOnce.Do(func() {
defer close(r.startReadyCh)
// Start the internal reconciler.
go r.reconcile()
// Start the group and queue up all
// the runnables that were added prior.
r.start.Lock()
r.started = true
for _, rn := range r.startQueue {
rn.signalReady = true
r.ch <- rn
}
r.start.Unlock()
// If we don't have any queue, return.
if len(r.startQueue) == 0 {
return
}
// Wait for all runnables to signal.
for {
select {
case <-ctx.Done():
if err := ctx.Err(); !errors.Is(err, context.Canceled) {
retErr = err
}
case rn := <-r.startReadyCh:
for i, existing := range r.startQueue {
if existing == rn {
// Remove the item from the start queue.
r.startQueue = append(r.startQueue[:i], r.startQueue[i+1:]...)
break
}
}
// We're done waiting if the queue is empty, return.
if len(r.startQueue) == 0 {
return
}
}
}
})
return retErr
}
// reconcile is our main entrypoint for every runnable added
// to this group. Its primary job is to read off the internal channel
// and schedule runnables while tracking their state.
func (r *runnableGroup) reconcile() {
for runnable := range r.ch {
// Handle stop.
// If the shutdown has been called we want to avoid
// adding new goroutines to the WaitGroup because Wait()
// panics if Add() is called after it.
{
r.stop.RLock()
if r.stopped {
// Drop any runnables if we're stopped.
r.errChan <- errRunnableGroupStopped
r.stop.RUnlock()
continue
}
// Why is this here?
// When StopAndWait is called, if a runnable is in the process
// of being added, we could end up in a situation where
// the WaitGroup is incremented while StopAndWait has called Wait(),
// which would result in a panic.
r.wg.Add(1)
r.stop.RUnlock()
}
// Start the runnable.
go func(rn *readyRunnable) {
go func() {
if rn.Check(r.ctx) {
if rn.signalReady {
r.startReadyCh <- rn
}
}
}()
// If we return, the runnable ended cleanly
// or returned an error to the channel.
//
// We should always decrement the WaitGroup here.
defer r.wg.Done()
// Start the runnable.
if err := rn.Start(r.ctx); err != nil {
r.errChan <- err
}
}(runnable)
}
}
// Add should be able to be called before and after Start, but not after StopAndWait.
// Add should return an error when called during StopAndWait.
func (r *runnableGroup) Add(rn Runnable, ready runnableCheck) error {
r.stop.RLock()
if r.stopped {
r.stop.RUnlock()
return errRunnableGroupStopped
}
r.stop.RUnlock()
if ready == nil {
ready = func(_ context.Context) bool { return true }
}
readyRunnable := &readyRunnable{
Runnable: rn,
Check: ready,
}
// Handle start.
// If the overall runnable group isn't started yet
// we want to buffer the runnables and let Start()
// queue them up again later.
{
r.start.Lock()
// Check if we're already started.
if !r.started {
// Store the runnable in the internal if not.
r.startQueue = append(r.startQueue, readyRunnable)
r.start.Unlock()
return nil
}
r.start.Unlock()
}
// Enqueue the runnable.
r.ch <- readyRunnable
return nil
}
// StopAndWait waits for all the runnables to finish before returning.
func (r *runnableGroup) StopAndWait(ctx context.Context) {
r.stopOnce.Do(func() {
// Close the reconciler channel once we're done.
defer close(r.ch)
_ = r.Start(ctx)
r.stop.Lock()
// Store the stopped variable so we don't accept any new
// runnables for the time being.
r.stopped = true
r.stop.Unlock()
// Cancel the internal channel.
r.cancel()
done := make(chan struct{})
go func() {
defer close(done)
// Wait for all the runnables to finish.
r.wg.Wait()
}()
select {
case <-done:
// We're done, exit.
case <-ctx.Done():
// Calling context has expired, exit.
}
})
}

Some files were not shown because too many files have changed in this diff Show More