mirror of
https://github.com/1Password/onepassword-operator.git
synced 2025-10-23 16:00:46 +00:00
Upgrade the operator to use Operator SDK v1.33.0 (#182)
* Move controller package inside internal directory Based on the go/v4 project structure, the following changed: - Pakcage `controllers` is now named `controller` - Package `controller` now lives inside new `internal` directory * Move main.go in cmd directory Based on the new go/v4 project structure, `main.go` now lives in the `cmd` directory. * Change package import in main.go * Update go mod dependencies Update the dependencies based on the versions obtained by creating a new operator project using `kubebuilder init --domain onepassword.com --plugins=go/v4`. This is based on the migration steps provided to go from go/v3 to go/v4 (https://book.kubebuilder.io/migration/migration_guide_gov3_to_gov4) * Update vendor * Adjust code for breaking changes from pkg update sigs.k8s.io/controller-runtime package had breaking changes from v0.14.5 to v0.16.3. This commit brings the changes needed to achieve the same things using the new functionality avaialble. * Adjust paths to connect yaml files Since `main.go` is now in `cmd` directory, the paths to the files for deploying Connect have to be adjusted based on the new location `main.go` is executed from. * Update files based on new structure and scaffolding These changes are made based on the new project structure and scaffolding obtained when using the new go/v4 project structure. These were done based on the migration steps mentioned when migrating to go/v4 (https://book.kubebuilder.io/migration/migration_guide_gov3_to_gov4). * Update config files These updates are made based on the Kustomize v4 syntax. This is part of the upgrate to go/v4 (https://book.kubebuilder.io/migration/migration_guide_gov3_to_gov4) * Update dependencies and GO version * Update vendor * Update Kubernetes tools versions * Update operator version in Makefile Now the version in the Makefile matches the version of the operator * Update Operator SDK version in version.go * Adjust generated deepcopy It seems that the +build tag is no longer needed based on the latest generated scaffolding, therefore it's removed. * Update copyright year * Bring back missing changes from migration Some customization in Makefile was lost during the migration process. Specifically, the namespace customization for `make deploy` command. Also, we push changes to kustomization.yaml for making the deploy process smoother. * Add RBAC perms for coordination.k8s.io It seems that with the latest changes to Kubernetes and Kustomize, we need to add additional RBAC to the service account used so that it can properly access the `leases` resource. * Optimize Dockerfile Dockerfile had a step for caching dependencies (go mod download). However, this is already done by the vendor directory, which we include. Therefore, this step can be removed to make the image build time faster.
This commit is contained in:
20
vendor/k8s.io/client-go/tools/leaderelection/leaderelection.go
generated
vendored
20
vendor/k8s.io/client-go/tools/leaderelection/leaderelection.go
generated
vendored
@@ -64,9 +64,8 @@ import (
|
||||
"k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
rl "k8s.io/client-go/tools/leaderelection/resourcelock"
|
||||
"k8s.io/utils/clock"
|
||||
|
||||
"k8s.io/klog/v2"
|
||||
"k8s.io/utils/clock"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -100,6 +99,11 @@ func NewLeaderElector(lec LeaderElectionConfig) (*LeaderElector, error) {
|
||||
if lec.Lock == nil {
|
||||
return nil, fmt.Errorf("Lock must not be nil.")
|
||||
}
|
||||
id := lec.Lock.Identity()
|
||||
if id == "" {
|
||||
return nil, fmt.Errorf("Lock identity is empty")
|
||||
}
|
||||
|
||||
le := LeaderElector{
|
||||
config: lec,
|
||||
clock: clock.RealClock{},
|
||||
@@ -199,9 +203,7 @@ type LeaderElector struct {
|
||||
// stopped holding the leader lease
|
||||
func (le *LeaderElector) Run(ctx context.Context) {
|
||||
defer runtime.HandleCrash()
|
||||
defer func() {
|
||||
le.config.Callbacks.OnStoppedLeading()
|
||||
}()
|
||||
defer le.config.Callbacks.OnStoppedLeading()
|
||||
|
||||
if !le.acquire(ctx) {
|
||||
return // ctx signalled done
|
||||
@@ -263,6 +265,7 @@ func (le *LeaderElector) acquire(ctx context.Context) bool {
|
||||
|
||||
// renew loops calling tryAcquireOrRenew and returns immediately when tryAcquireOrRenew fails or ctx signals done.
|
||||
func (le *LeaderElector) renew(ctx context.Context) {
|
||||
defer le.config.Lock.RecordEvent("stopped leading")
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
wait.Until(func() {
|
||||
@@ -278,7 +281,6 @@ func (le *LeaderElector) renew(ctx context.Context) {
|
||||
klog.V(5).Infof("successfully renewed lease %v", desc)
|
||||
return
|
||||
}
|
||||
le.config.Lock.RecordEvent("stopped leading")
|
||||
le.metrics.leaderOff(le.config.Name)
|
||||
klog.Infof("failed to renew lease %v: %v", desc, err)
|
||||
cancel()
|
||||
@@ -295,7 +297,7 @@ func (le *LeaderElector) release() bool {
|
||||
if !le.IsLeader() {
|
||||
return true
|
||||
}
|
||||
now := metav1.Now()
|
||||
now := metav1.NewTime(le.clock.Now())
|
||||
leaderElectionRecord := rl.LeaderElectionRecord{
|
||||
LeaderTransitions: le.observedRecord.LeaderTransitions,
|
||||
LeaseDurationSeconds: 1,
|
||||
@@ -315,7 +317,7 @@ func (le *LeaderElector) release() bool {
|
||||
// else it tries to renew the lease if it has already been acquired. Returns true
|
||||
// on success else returns false.
|
||||
func (le *LeaderElector) tryAcquireOrRenew(ctx context.Context) bool {
|
||||
now := metav1.Now()
|
||||
now := metav1.NewTime(le.clock.Now())
|
||||
leaderElectionRecord := rl.LeaderElectionRecord{
|
||||
HolderIdentity: le.config.Lock.Identity(),
|
||||
LeaseDurationSeconds: int(le.config.LeaseDuration / time.Second),
|
||||
@@ -347,7 +349,7 @@ func (le *LeaderElector) tryAcquireOrRenew(ctx context.Context) bool {
|
||||
le.observedRawRecord = oldLeaderElectionRawRecord
|
||||
}
|
||||
if len(oldLeaderElectionRecord.HolderIdentity) > 0 &&
|
||||
le.observedTime.Add(le.config.LeaseDuration).After(now.Time) &&
|
||||
le.observedTime.Add(time.Second*time.Duration(oldLeaderElectionRecord.LeaseDurationSeconds)).After(now.Time) &&
|
||||
!le.IsLeader() {
|
||||
klog.V(4).Infof("lock is held by %v and has not yet expired", oldLeaderElectionRecord.HolderIdentity)
|
||||
return false
|
||||
|
126
vendor/k8s.io/client-go/tools/leaderelection/resourcelock/configmaplock.go
generated
vendored
126
vendor/k8s.io/client-go/tools/leaderelection/resourcelock/configmaplock.go
generated
vendored
@@ -1,126 +0,0 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package resourcelock
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
corev1client "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||
)
|
||||
|
||||
// TODO: This is almost a exact replica of Endpoints lock.
|
||||
// going forwards as we self host more and more components
|
||||
// and use ConfigMaps as the means to pass that configuration
|
||||
// data we will likely move to deprecate the Endpoints lock.
|
||||
|
||||
type configMapLock struct {
|
||||
// ConfigMapMeta should contain a Name and a Namespace of a
|
||||
// ConfigMapMeta object that the LeaderElector will attempt to lead.
|
||||
ConfigMapMeta metav1.ObjectMeta
|
||||
Client corev1client.ConfigMapsGetter
|
||||
LockConfig ResourceLockConfig
|
||||
cm *v1.ConfigMap
|
||||
}
|
||||
|
||||
// Get returns the election record from a ConfigMap Annotation
|
||||
func (cml *configMapLock) Get(ctx context.Context) (*LeaderElectionRecord, []byte, error) {
|
||||
var record LeaderElectionRecord
|
||||
cm, err := cml.Client.ConfigMaps(cml.ConfigMapMeta.Namespace).Get(ctx, cml.ConfigMapMeta.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
cml.cm = cm
|
||||
if cml.cm.Annotations == nil {
|
||||
cml.cm.Annotations = make(map[string]string)
|
||||
}
|
||||
recordStr, found := cml.cm.Annotations[LeaderElectionRecordAnnotationKey]
|
||||
recordBytes := []byte(recordStr)
|
||||
if found {
|
||||
if err := json.Unmarshal(recordBytes, &record); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
}
|
||||
return &record, recordBytes, nil
|
||||
}
|
||||
|
||||
// Create attempts to create a LeaderElectionRecord annotation
|
||||
func (cml *configMapLock) Create(ctx context.Context, ler LeaderElectionRecord) error {
|
||||
recordBytes, err := json.Marshal(ler)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
cml.cm, err = cml.Client.ConfigMaps(cml.ConfigMapMeta.Namespace).Create(ctx, &v1.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: cml.ConfigMapMeta.Name,
|
||||
Namespace: cml.ConfigMapMeta.Namespace,
|
||||
Annotations: map[string]string{
|
||||
LeaderElectionRecordAnnotationKey: string(recordBytes),
|
||||
},
|
||||
},
|
||||
}, metav1.CreateOptions{})
|
||||
return err
|
||||
}
|
||||
|
||||
// Update will update an existing annotation on a given resource.
|
||||
func (cml *configMapLock) Update(ctx context.Context, ler LeaderElectionRecord) error {
|
||||
if cml.cm == nil {
|
||||
return errors.New("configmap not initialized, call get or create first")
|
||||
}
|
||||
recordBytes, err := json.Marshal(ler)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if cml.cm.Annotations == nil {
|
||||
cml.cm.Annotations = make(map[string]string)
|
||||
}
|
||||
cml.cm.Annotations[LeaderElectionRecordAnnotationKey] = string(recordBytes)
|
||||
cm, err := cml.Client.ConfigMaps(cml.ConfigMapMeta.Namespace).Update(ctx, cml.cm, metav1.UpdateOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
cml.cm = cm
|
||||
return nil
|
||||
}
|
||||
|
||||
// RecordEvent in leader election while adding meta-data
|
||||
func (cml *configMapLock) RecordEvent(s string) {
|
||||
if cml.LockConfig.EventRecorder == nil {
|
||||
return
|
||||
}
|
||||
events := fmt.Sprintf("%v %v", cml.LockConfig.Identity, s)
|
||||
subject := &v1.ConfigMap{ObjectMeta: cml.cm.ObjectMeta}
|
||||
// Populate the type meta, so we don't have to get it from the schema
|
||||
subject.Kind = "ConfigMap"
|
||||
subject.APIVersion = v1.SchemeGroupVersion.String()
|
||||
cml.LockConfig.EventRecorder.Eventf(subject, v1.EventTypeNormal, "LeaderElection", events)
|
||||
}
|
||||
|
||||
// Describe is used to convert details on current resource lock
|
||||
// into a string
|
||||
func (cml *configMapLock) Describe() string {
|
||||
return fmt.Sprintf("%v/%v", cml.ConfigMapMeta.Namespace, cml.ConfigMapMeta.Name)
|
||||
}
|
||||
|
||||
// Identity returns the Identity of the lock
|
||||
func (cml *configMapLock) Identity() string {
|
||||
return cml.LockConfig.Identity
|
||||
}
|
121
vendor/k8s.io/client-go/tools/leaderelection/resourcelock/endpointslock.go
generated
vendored
121
vendor/k8s.io/client-go/tools/leaderelection/resourcelock/endpointslock.go
generated
vendored
@@ -1,121 +0,0 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package resourcelock
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
corev1client "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||
)
|
||||
|
||||
type endpointsLock struct {
|
||||
// EndpointsMeta should contain a Name and a Namespace of an
|
||||
// Endpoints object that the LeaderElector will attempt to lead.
|
||||
EndpointsMeta metav1.ObjectMeta
|
||||
Client corev1client.EndpointsGetter
|
||||
LockConfig ResourceLockConfig
|
||||
e *v1.Endpoints
|
||||
}
|
||||
|
||||
// Get returns the election record from a Endpoints Annotation
|
||||
func (el *endpointsLock) Get(ctx context.Context) (*LeaderElectionRecord, []byte, error) {
|
||||
var record LeaderElectionRecord
|
||||
ep, err := el.Client.Endpoints(el.EndpointsMeta.Namespace).Get(ctx, el.EndpointsMeta.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
el.e = ep
|
||||
if el.e.Annotations == nil {
|
||||
el.e.Annotations = make(map[string]string)
|
||||
}
|
||||
recordStr, found := el.e.Annotations[LeaderElectionRecordAnnotationKey]
|
||||
recordBytes := []byte(recordStr)
|
||||
if found {
|
||||
if err := json.Unmarshal(recordBytes, &record); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
}
|
||||
return &record, recordBytes, nil
|
||||
}
|
||||
|
||||
// Create attempts to create a LeaderElectionRecord annotation
|
||||
func (el *endpointsLock) Create(ctx context.Context, ler LeaderElectionRecord) error {
|
||||
recordBytes, err := json.Marshal(ler)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
el.e, err = el.Client.Endpoints(el.EndpointsMeta.Namespace).Create(ctx, &v1.Endpoints{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: el.EndpointsMeta.Name,
|
||||
Namespace: el.EndpointsMeta.Namespace,
|
||||
Annotations: map[string]string{
|
||||
LeaderElectionRecordAnnotationKey: string(recordBytes),
|
||||
},
|
||||
},
|
||||
}, metav1.CreateOptions{})
|
||||
return err
|
||||
}
|
||||
|
||||
// Update will update and existing annotation on a given resource.
|
||||
func (el *endpointsLock) Update(ctx context.Context, ler LeaderElectionRecord) error {
|
||||
if el.e == nil {
|
||||
return errors.New("endpoint not initialized, call get or create first")
|
||||
}
|
||||
recordBytes, err := json.Marshal(ler)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if el.e.Annotations == nil {
|
||||
el.e.Annotations = make(map[string]string)
|
||||
}
|
||||
el.e.Annotations[LeaderElectionRecordAnnotationKey] = string(recordBytes)
|
||||
e, err := el.Client.Endpoints(el.EndpointsMeta.Namespace).Update(ctx, el.e, metav1.UpdateOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
el.e = e
|
||||
return nil
|
||||
}
|
||||
|
||||
// RecordEvent in leader election while adding meta-data
|
||||
func (el *endpointsLock) RecordEvent(s string) {
|
||||
if el.LockConfig.EventRecorder == nil {
|
||||
return
|
||||
}
|
||||
events := fmt.Sprintf("%v %v", el.LockConfig.Identity, s)
|
||||
subject := &v1.Endpoints{ObjectMeta: el.e.ObjectMeta}
|
||||
// Populate the type meta, so we don't have to get it from the schema
|
||||
subject.Kind = "Endpoints"
|
||||
subject.APIVersion = v1.SchemeGroupVersion.String()
|
||||
el.LockConfig.EventRecorder.Eventf(subject, v1.EventTypeNormal, "LeaderElection", events)
|
||||
}
|
||||
|
||||
// Describe is used to convert details on current resource lock
|
||||
// into a string
|
||||
func (el *endpointsLock) Describe() string {
|
||||
return fmt.Sprintf("%v/%v", el.EndpointsMeta.Namespace, el.EndpointsMeta.Name)
|
||||
}
|
||||
|
||||
// Identity returns the Identity of the lock
|
||||
func (el *endpointsLock) Identity() string {
|
||||
return el.LockConfig.Identity
|
||||
}
|
42
vendor/k8s.io/client-go/tools/leaderelection/resourcelock/interface.go
generated
vendored
42
vendor/k8s.io/client-go/tools/leaderelection/resourcelock/interface.go
generated
vendored
@@ -34,7 +34,7 @@ const (
|
||||
endpointsResourceLock = "endpoints"
|
||||
configMapsResourceLock = "configmaps"
|
||||
LeasesResourceLock = "leases"
|
||||
// When using EndpointsLeasesResourceLock, you need to ensure that
|
||||
// When using endpointsLeasesResourceLock, you need to ensure that
|
||||
// API Priority & Fairness is configured with non-default flow-schema
|
||||
// that will catch the necessary operations on leader-election related
|
||||
// endpoint objects.
|
||||
@@ -67,8 +67,8 @@ const (
|
||||
// serviceAccount:
|
||||
// name: '*'
|
||||
// namespace: kube-system
|
||||
EndpointsLeasesResourceLock = "endpointsleases"
|
||||
// When using EndpointsLeasesResourceLock, you need to ensure that
|
||||
endpointsLeasesResourceLock = "endpointsleases"
|
||||
// When using configMapsLeasesResourceLock, you need to ensure that
|
||||
// API Priority & Fairness is configured with non-default flow-schema
|
||||
// that will catch the necessary operations on leader-election related
|
||||
// configmap objects.
|
||||
@@ -101,7 +101,7 @@ const (
|
||||
// serviceAccount:
|
||||
// name: '*'
|
||||
// namespace: kube-system
|
||||
ConfigMapsLeasesResourceLock = "configmapsleases"
|
||||
configMapsLeasesResourceLock = "configmapsleases"
|
||||
)
|
||||
|
||||
// LeaderElectionRecord is the record that is stored in the leader election annotation.
|
||||
@@ -164,22 +164,6 @@ type Interface interface {
|
||||
|
||||
// Manufacture will create a lock of a given type according to the input parameters
|
||||
func New(lockType string, ns string, name string, coreClient corev1.CoreV1Interface, coordinationClient coordinationv1.CoordinationV1Interface, rlc ResourceLockConfig) (Interface, error) {
|
||||
endpointsLock := &endpointsLock{
|
||||
EndpointsMeta: metav1.ObjectMeta{
|
||||
Namespace: ns,
|
||||
Name: name,
|
||||
},
|
||||
Client: coreClient,
|
||||
LockConfig: rlc,
|
||||
}
|
||||
configmapLock := &configMapLock{
|
||||
ConfigMapMeta: metav1.ObjectMeta{
|
||||
Namespace: ns,
|
||||
Name: name,
|
||||
},
|
||||
Client: coreClient,
|
||||
LockConfig: rlc,
|
||||
}
|
||||
leaseLock := &LeaseLock{
|
||||
LeaseMeta: metav1.ObjectMeta{
|
||||
Namespace: ns,
|
||||
@@ -190,21 +174,15 @@ func New(lockType string, ns string, name string, coreClient corev1.CoreV1Interf
|
||||
}
|
||||
switch lockType {
|
||||
case endpointsResourceLock:
|
||||
return nil, fmt.Errorf("endpoints lock is removed, migrate to %s", EndpointsLeasesResourceLock)
|
||||
return nil, fmt.Errorf("endpoints lock is removed, migrate to %s (using version v0.27.x)", endpointsLeasesResourceLock)
|
||||
case configMapsResourceLock:
|
||||
return nil, fmt.Errorf("configmaps lock is removed, migrate to %s", ConfigMapsLeasesResourceLock)
|
||||
return nil, fmt.Errorf("configmaps lock is removed, migrate to %s (using version v0.27.x)", configMapsLeasesResourceLock)
|
||||
case LeasesResourceLock:
|
||||
return leaseLock, nil
|
||||
case EndpointsLeasesResourceLock:
|
||||
return &MultiLock{
|
||||
Primary: endpointsLock,
|
||||
Secondary: leaseLock,
|
||||
}, nil
|
||||
case ConfigMapsLeasesResourceLock:
|
||||
return &MultiLock{
|
||||
Primary: configmapLock,
|
||||
Secondary: leaseLock,
|
||||
}, nil
|
||||
case endpointsLeasesResourceLock:
|
||||
return nil, fmt.Errorf("endpointsleases lock is removed, migrate to %s", LeasesResourceLock)
|
||||
case configMapsLeasesResourceLock:
|
||||
return nil, fmt.Errorf("configmapsleases lock is removed, migrated to %s", LeasesResourceLock)
|
||||
default:
|
||||
return nil, fmt.Errorf("Invalid lock-type %s", lockType)
|
||||
}
|
||||
|
8
vendor/k8s.io/client-go/tools/leaderelection/resourcelock/leaselock.go
generated
vendored
8
vendor/k8s.io/client-go/tools/leaderelection/resourcelock/leaselock.go
generated
vendored
@@ -117,10 +117,10 @@ func LeaseSpecToLeaderElectionRecord(spec *coordinationv1.LeaseSpec) *LeaderElec
|
||||
r.LeaderTransitions = int(*spec.LeaseTransitions)
|
||||
}
|
||||
if spec.AcquireTime != nil {
|
||||
r.AcquireTime = metav1.Time{spec.AcquireTime.Time}
|
||||
r.AcquireTime = metav1.Time{Time: spec.AcquireTime.Time}
|
||||
}
|
||||
if spec.RenewTime != nil {
|
||||
r.RenewTime = metav1.Time{spec.RenewTime.Time}
|
||||
r.RenewTime = metav1.Time{Time: spec.RenewTime.Time}
|
||||
}
|
||||
return &r
|
||||
|
||||
@@ -132,8 +132,8 @@ func LeaderElectionRecordToLeaseSpec(ler *LeaderElectionRecord) coordinationv1.L
|
||||
return coordinationv1.LeaseSpec{
|
||||
HolderIdentity: &ler.HolderIdentity,
|
||||
LeaseDurationSeconds: &leaseDurationSeconds,
|
||||
AcquireTime: &metav1.MicroTime{ler.AcquireTime.Time},
|
||||
RenewTime: &metav1.MicroTime{ler.RenewTime.Time},
|
||||
AcquireTime: &metav1.MicroTime{Time: ler.AcquireTime.Time},
|
||||
RenewTime: &metav1.MicroTime{Time: ler.RenewTime.Time},
|
||||
LeaseTransitions: &leaseTransitions,
|
||||
}
|
||||
}
|
||||
|
Reference in New Issue
Block a user