From 2ba738ad50891da31e9c25427acfd71f508acbaf Mon Sep 17 00:00:00 2001 From: Forrest Babcock Date: Fri, 5 Dec 2025 14:22:20 -0500 Subject: [PATCH 1/6] Revert "Merge pull request #574 from tmshort/disable-pdb" This reverts commit cbbc65cbccd8d4955ed2c0b6937e7cb31d32c1b1, reversing changes made to 24cf078eb146425bd4b72ebe456ee4fc0a01a468. --- openshift/catalogd/manifests-experimental.yaml | 17 +++++++++++++++++ openshift/catalogd/manifests.yaml | 17 +++++++++++++++++ openshift/helm/catalogd.yaml | 2 -- openshift/helm/operator-controller.yaml | 2 -- .../manifests-experimental.yaml | 17 +++++++++++++++++ openshift/operator-controller/manifests.yaml | 17 +++++++++++++++++ 6 files changed, 68 insertions(+), 4 deletions(-) diff --git a/openshift/catalogd/manifests-experimental.yaml b/openshift/catalogd/manifests-experimental.yaml index 4dacdee86..06bd2abd8 100644 --- a/openshift/catalogd/manifests-experimental.yaml +++ b/openshift/catalogd/manifests-experimental.yaml @@ -65,6 +65,23 @@ spec: - Ingress - Egress --- +# Source: olmv1/templates/poddisruptionbudget-olmv1-system-catalogd.yml +apiVersion: policy/v1 +kind: PodDisruptionBudget +metadata: + name: catalogd-controller-manager + namespace: openshift-catalogd + labels: + app.kubernetes.io/name: catalogd + app.kubernetes.io/part-of: olm + annotations: + olm.operatorframework.io/feature-set: experimental +spec: + minAvailable: 1 + selector: + matchLabels: + control-plane: catalogd-controller-manager +--- # Source: olmv1/templates/serviceaccount-olmv1-system-common-controller-manager.yml apiVersion: v1 kind: ServiceAccount diff --git a/openshift/catalogd/manifests.yaml b/openshift/catalogd/manifests.yaml index 68b6c87f3..e197256bf 100644 --- a/openshift/catalogd/manifests.yaml +++ b/openshift/catalogd/manifests.yaml @@ -65,6 +65,23 @@ spec: - Ingress - Egress --- +# Source: olmv1/templates/poddisruptionbudget-olmv1-system-catalogd.yml +apiVersion: policy/v1 +kind: PodDisruptionBudget +metadata: + name: catalogd-controller-manager + namespace: openshift-catalogd + labels: + app.kubernetes.io/name: catalogd + app.kubernetes.io/part-of: olm + annotations: + olm.operatorframework.io/feature-set: standard +spec: + minAvailable: 1 + selector: + matchLabels: + control-plane: catalogd-controller-manager +--- # Source: olmv1/templates/serviceaccount-olmv1-system-common-controller-manager.yml apiVersion: v1 kind: ServiceAccount diff --git a/openshift/helm/catalogd.yaml b/openshift/helm/catalogd.yaml index 1e321bb52..8eeae6e12 100644 --- a/openshift/helm/catalogd.yaml +++ b/openshift/helm/catalogd.yaml @@ -8,8 +8,6 @@ options: enabled: true deployment: image: ${CATALOGD_IMAGE} - podDisruptionBudget: - enabled: false operatorController: enabled: false openshift: diff --git a/openshift/helm/operator-controller.yaml b/openshift/helm/operator-controller.yaml index ee6276a25..ff48c8fd1 100644 --- a/openshift/helm/operator-controller.yaml +++ b/openshift/helm/operator-controller.yaml @@ -8,8 +8,6 @@ options: enabled: true deployment: image: ${OPERATOR_CONTROLLER_IMAGE} - podDisruptionBudget: - enabled: false catalogd: enabled: false openshift: diff --git a/openshift/operator-controller/manifests-experimental.yaml b/openshift/operator-controller/manifests-experimental.yaml index 6ecb52ff2..e8893063e 100644 --- a/openshift/operator-controller/manifests-experimental.yaml +++ b/openshift/operator-controller/manifests-experimental.yaml @@ -61,6 +61,23 @@ spec: - Ingress - Egress --- +# Source: olmv1/templates/poddisruptionbudget-olmv1-system-operator-controller.yml +apiVersion: policy/v1 +kind: PodDisruptionBudget +metadata: + name: operator-controller-controller-manager + namespace: openshift-operator-controller + labels: + app.kubernetes.io/name: operator-controller + app.kubernetes.io/part-of: olm + annotations: + olm.operatorframework.io/feature-set: experimental +spec: + minAvailable: 1 + selector: + matchLabels: + control-plane: operator-controller-controller-manager +--- # Source: olmv1/templates/serviceaccount-olmv1-system-common-controller-manager.yml apiVersion: v1 kind: ServiceAccount diff --git a/openshift/operator-controller/manifests.yaml b/openshift/operator-controller/manifests.yaml index 091dfe26a..8d2be5ecf 100644 --- a/openshift/operator-controller/manifests.yaml +++ b/openshift/operator-controller/manifests.yaml @@ -61,6 +61,23 @@ spec: - Ingress - Egress --- +# Source: olmv1/templates/poddisruptionbudget-olmv1-system-operator-controller.yml +apiVersion: policy/v1 +kind: PodDisruptionBudget +metadata: + name: operator-controller-controller-manager + namespace: openshift-operator-controller + labels: + app.kubernetes.io/name: operator-controller + app.kubernetes.io/part-of: olm + annotations: + olm.operatorframework.io/feature-set: standard +spec: + minAvailable: 1 + selector: + matchLabels: + control-plane: operator-controller-controller-manager +--- # Source: olmv1/templates/serviceaccount-olmv1-system-common-controller-manager.yml apiVersion: v1 kind: ServiceAccount From da9278bcfa8bff57533cdf8f9e87c668eda4f1fe Mon Sep 17 00:00:00 2001 From: Forrest Babcock Date: Fri, 5 Dec 2025 14:24:44 -0500 Subject: [PATCH 2/6] Revert "Merge pull request #572 from openshift-bot/synchronize-upstream" This reverts commit 24cf078eb146425bd4b72ebe456ee4fc0a01a468, reversing changes made to 93bf7ab681583c170baa5dce41ae45bc96314b92. --- cmd/catalogd/main.go | 3 - cmd/operator-controller/main.go | 3 - commitchecker.yaml | 2 +- .../core/clustercatalog_controller.go | 22 +- .../operator-controller/applier/boxcutter.go | 8 - .../clusterextension_controller.go | 22 +- internal/shared/util/cache/transform.go | 91 ------- internal/shared/util/k8s/k8s.go | 54 ----- internal/shared/util/k8s/k8s_test.go | 227 ------------------ 9 files changed, 26 insertions(+), 406 deletions(-) delete mode 100644 internal/shared/util/cache/transform.go delete mode 100644 internal/shared/util/k8s/k8s.go delete mode 100644 internal/shared/util/k8s/k8s_test.go diff --git a/cmd/catalogd/main.go b/cmd/catalogd/main.go index af2463e2c..36f7b1675 100644 --- a/cmd/catalogd/main.go +++ b/cmd/catalogd/main.go @@ -59,7 +59,6 @@ import ( "github.com/operator-framework/operator-controller/internal/catalogd/storage" "github.com/operator-framework/operator-controller/internal/catalogd/webhook" sharedcontrollers "github.com/operator-framework/operator-controller/internal/shared/controllers" - cacheutil "github.com/operator-framework/operator-controller/internal/shared/util/cache" fsutil "github.com/operator-framework/operator-controller/internal/shared/util/fs" httputil "github.com/operator-framework/operator-controller/internal/shared/util/http" imageutil "github.com/operator-framework/operator-controller/internal/shared/util/image" @@ -255,8 +254,6 @@ func run(ctx context.Context) error { cacheOptions := crcache.Options{ ByObject: map[client.Object]crcache.ByObject{}, - // Memory optimization: strip managed fields and large annotations from cached objects - DefaultTransform: cacheutil.StripManagedFieldsAndAnnotations(), } saKey, err := sautil.GetServiceAccount() diff --git a/cmd/operator-controller/main.go b/cmd/operator-controller/main.go index c72ba60f2..5534244ac 100644 --- a/cmd/operator-controller/main.go +++ b/cmd/operator-controller/main.go @@ -78,7 +78,6 @@ import ( "github.com/operator-framework/operator-controller/internal/operator-controller/rukpak/render/registryv1" "github.com/operator-framework/operator-controller/internal/operator-controller/scheme" sharedcontrollers "github.com/operator-framework/operator-controller/internal/shared/controllers" - cacheutil "github.com/operator-framework/operator-controller/internal/shared/util/cache" fsutil "github.com/operator-framework/operator-controller/internal/shared/util/fs" httputil "github.com/operator-framework/operator-controller/internal/shared/util/http" imageutil "github.com/operator-framework/operator-controller/internal/shared/util/image" @@ -258,8 +257,6 @@ func run() error { cfg.systemNamespace: {LabelSelector: k8slabels.Everything()}, }, DefaultLabelSelector: k8slabels.Nothing(), - // Memory optimization: strip managed fields and large annotations from cached objects - DefaultTransform: cacheutil.StripAnnotations(), } if features.OperatorControllerFeatureGate.Enabled(features.BoxcutterRuntime) { diff --git a/commitchecker.yaml b/commitchecker.yaml index 6bb0b5fac..c580f04e4 100644 --- a/commitchecker.yaml +++ b/commitchecker.yaml @@ -1,4 +1,4 @@ -expectedMergeBase: 4e349e62c5314574f6194d64b1ff4508f2e9331f +expectedMergeBase: 34394ce0a7067e1f1622dc2c381a9a12439b10d2 upstreamBranch: main upstreamOrg: operator-framework upstreamRepo: operator-controller diff --git a/internal/catalogd/controllers/core/clustercatalog_controller.go b/internal/catalogd/controllers/core/clustercatalog_controller.go index 3d7fd935c..e968db7b9 100644 --- a/internal/catalogd/controllers/core/clustercatalog_controller.go +++ b/internal/catalogd/controllers/core/clustercatalog_controller.go @@ -41,7 +41,6 @@ import ( ocv1 "github.com/operator-framework/operator-controller/api/v1" "github.com/operator-framework/operator-controller/internal/catalogd/storage" imageutil "github.com/operator-framework/operator-controller/internal/shared/util/image" - k8sutil "github.com/operator-framework/operator-controller/internal/shared/util/k8s" ) const ( @@ -108,7 +107,7 @@ func (r *ClusterCatalogReconciler) Reconcile(ctx context.Context, req ctrl.Reque // Do checks before any Update()s, as Update() may modify the resource structure! updateStatus := !equality.Semantic.DeepEqual(existingCatsrc.Status, reconciledCatsrc.Status) updateFinalizers := !equality.Semantic.DeepEqual(existingCatsrc.Finalizers, reconciledCatsrc.Finalizers) - unexpectedFieldsChanged := k8sutil.CheckForUnexpectedFieldChange(&existingCatsrc, reconciledCatsrc) + unexpectedFieldsChanged := checkForUnexpectedFieldChange(existingCatsrc, *reconciledCatsrc) if unexpectedFieldsChanged { panic("spec or metadata changed by reconciler") @@ -116,8 +115,8 @@ func (r *ClusterCatalogReconciler) Reconcile(ctx context.Context, req ctrl.Reque // Save the finalizers off to the side. If we update the status, the reconciledCatsrc will be updated // to contain the new state of the ClusterCatalog, which contains the status update, but (critically) - // does not contain the finalizers. After the status update, we will use the saved finalizers in the - // CreateOrPatch() + // does not contain the finalizers. After the status update, we need to re-add the finalizers to the + // reconciledCatsrc before updating the object. finalizers := reconciledCatsrc.Finalizers if updateStatus { @@ -126,12 +125,10 @@ func (r *ClusterCatalogReconciler) Reconcile(ctx context.Context, req ctrl.Reque } } + reconciledCatsrc.Finalizers = finalizers + if updateFinalizers { - // Use CreateOrPatch to update finalizers on the server - if _, err := controllerutil.CreateOrPatch(ctx, r.Client, reconciledCatsrc, func() error { - reconciledCatsrc.Finalizers = finalizers - return nil - }); err != nil { + if err := r.Update(ctx, reconciledCatsrc); err != nil { reconcileErr = errors.Join(reconcileErr, fmt.Errorf("error updating finalizers: %v", err)) } } @@ -418,6 +415,13 @@ func (r *ClusterCatalogReconciler) needsPoll(lastSuccessfulPoll time.Time, catal return nextPoll.Before(time.Now()) } +// Compare resources - ignoring status & metadata.finalizers +func checkForUnexpectedFieldChange(a, b ocv1.ClusterCatalog) bool { + a.Status, b.Status = ocv1.ClusterCatalogStatus{}, ocv1.ClusterCatalogStatus{} + a.Finalizers, b.Finalizers = []string{}, []string{} + return !equality.Semantic.DeepEqual(a, b) +} + type finalizerFunc func(ctx context.Context, obj client.Object) (crfinalizer.Result, error) func (f finalizerFunc) Finalize(ctx context.Context, obj client.Object) (crfinalizer.Result, error) { diff --git a/internal/operator-controller/applier/boxcutter.go b/internal/operator-controller/applier/boxcutter.go index 3895b49df..6abcd0c43 100644 --- a/internal/operator-controller/applier/boxcutter.go +++ b/internal/operator-controller/applier/boxcutter.go @@ -27,7 +27,6 @@ import ( ocv1 "github.com/operator-framework/operator-controller/api/v1" "github.com/operator-framework/operator-controller/internal/operator-controller/labels" - "github.com/operator-framework/operator-controller/internal/shared/util/cache" ) const ( @@ -67,9 +66,6 @@ func (r *SimpleRevisionGenerator) GenerateRevisionFromHelmRelease( maps.Copy(labels, objectLabels) obj.SetLabels(labels) - // Memory optimization: strip large annotations - // Note: ApplyStripTransform never returns an error in practice - _ = cache.ApplyStripAnnotationsTransform(&obj) sanitizedUnstructured(ctx, &obj) objs = append(objs, ocv1.ClusterExtensionRevisionObject{ @@ -121,10 +117,6 @@ func (r *SimpleRevisionGenerator) GenerateRevision( unstr := unstructured.Unstructured{Object: unstrObj} unstr.SetGroupVersionKind(gvk) - // Memory optimization: strip large annotations - if err := cache.ApplyStripAnnotationsTransform(&unstr); err != nil { - return nil, err - } sanitizedUnstructured(ctx, &unstr) objs = append(objs, ocv1.ClusterExtensionRevisionObject{ diff --git a/internal/operator-controller/controllers/clusterextension_controller.go b/internal/operator-controller/controllers/clusterextension_controller.go index 2b2f0d532..ef8cbd5f6 100644 --- a/internal/operator-controller/controllers/clusterextension_controller.go +++ b/internal/operator-controller/controllers/clusterextension_controller.go @@ -35,7 +35,6 @@ import ( "sigs.k8s.io/controller-runtime/pkg/builder" "sigs.k8s.io/controller-runtime/pkg/client" crcontroller "sigs.k8s.io/controller-runtime/pkg/controller" - "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" "sigs.k8s.io/controller-runtime/pkg/event" crhandler "sigs.k8s.io/controller-runtime/pkg/handler" "sigs.k8s.io/controller-runtime/pkg/log" @@ -49,7 +48,6 @@ import ( ocv1 "github.com/operator-framework/operator-controller/api/v1" "github.com/operator-framework/operator-controller/internal/operator-controller/conditionsets" "github.com/operator-framework/operator-controller/internal/operator-controller/labels" - k8sutil "github.com/operator-framework/operator-controller/internal/shared/util/k8s" ) const ( @@ -137,28 +135,25 @@ func (r *ClusterExtensionReconciler) Reconcile(ctx context.Context, req ctrl.Req updateFinalizers := !equality.Semantic.DeepEqual(existingExt.Finalizers, reconciledExt.Finalizers) // If any unexpected fields have changed, panic before updating the resource - unexpectedFieldsChanged := k8sutil.CheckForUnexpectedFieldChange(existingExt, reconciledExt) + unexpectedFieldsChanged := checkForUnexpectedClusterExtensionFieldChange(*existingExt, *reconciledExt) if unexpectedFieldsChanged { panic("spec or metadata changed by reconciler") } // Save the finalizers off to the side. If we update the status, the reconciledExt will be updated // to contain the new state of the ClusterExtension, which contains the status update, but (critically) - // does not contain the finalizers. After the status update, we will use the saved finalizers in the - // CreateOrPatch() + // does not contain the finalizers. After the status update, we need to re-add the finalizers to the + // reconciledExt before updating the object. finalizers := reconciledExt.Finalizers if updateStatus { if err := r.Client.Status().Update(ctx, reconciledExt); err != nil { reconcileErr = errors.Join(reconcileErr, fmt.Errorf("error updating status: %v", err)) } } + reconciledExt.Finalizers = finalizers if updateFinalizers { - // Use CreateOrPatch to update finalizers on the server - if _, err := controllerutil.CreateOrPatch(ctx, r.Client, reconciledExt, func() error { - reconciledExt.Finalizers = finalizers - return nil - }); err != nil { + if err := r.Update(ctx, reconciledExt); err != nil { reconcileErr = errors.Join(reconcileErr, fmt.Errorf("error updating finalizers: %v", err)) } } @@ -184,6 +179,13 @@ func ensureAllConditionsWithReason(ext *ocv1.ClusterExtension, reason v1alpha1.C } } +// Compare resources - ignoring status & metadata.finalizers +func checkForUnexpectedClusterExtensionFieldChange(a, b ocv1.ClusterExtension) bool { + a.Status, b.Status = ocv1.ClusterExtensionStatus{}, ocv1.ClusterExtensionStatus{} + a.Finalizers, b.Finalizers = []string{}, []string{} + return !equality.Semantic.DeepEqual(a, b) +} + // SetDeprecationStatus will set the appropriate deprecation statuses for a ClusterExtension // based on the provided bundle func SetDeprecationStatus(ext *ocv1.ClusterExtension, bundleName string, deprecation *declcfg.Deprecation) { diff --git a/internal/shared/util/cache/transform.go b/internal/shared/util/cache/transform.go deleted file mode 100644 index 50a553039..000000000 --- a/internal/shared/util/cache/transform.go +++ /dev/null @@ -1,91 +0,0 @@ -package cache - -import ( - "maps" - - toolscache "k8s.io/client-go/tools/cache" - crcache "sigs.k8s.io/controller-runtime/pkg/cache" - "sigs.k8s.io/controller-runtime/pkg/client" -) - -// stripAnnotations removes memory-heavy annotations that aren't needed for controller operations. -func stripAnnotations(obj interface{}) (interface{}, error) { - if metaObj, ok := obj.(client.Object); ok { - // Remove the last-applied-configuration annotation which can be very large - // Clone the annotations map to avoid modifying shared references - annotations := metaObj.GetAnnotations() - if annotations != nil { - annotations = maps.Clone(annotations) - delete(annotations, "kubectl.kubernetes.io/last-applied-configuration") - if len(annotations) == 0 { - metaObj.SetAnnotations(nil) - } else { - metaObj.SetAnnotations(annotations) - } - } - } - return obj, nil -} - -// StripManagedFieldsAndAnnotations returns a cache transform function that removes -// memory-heavy fields that aren't needed for controller operations. -// This significantly reduces memory usage in informer caches by removing: -// - Managed fields (can be several KB per object) -// - kubectl.kubernetes.io/last-applied-configuration annotation (can be very large) -// -// Use this function as a DefaultTransform in controller-runtime cache.Options -// to reduce memory overhead across all cached objects. -// -// Example: -// -// cacheOptions := cache.Options{ -// DefaultTransform: cacheutil.StripManagedFieldsAndAnnotations(), -// } -func StripManagedFieldsAndAnnotations() toolscache.TransformFunc { - // Use controller-runtime's built-in TransformStripManagedFields and compose it - // with our custom annotation stripping transform - managedFieldsTransform := crcache.TransformStripManagedFields() - - return func(obj interface{}) (interface{}, error) { - // First strip managed fields using controller-runtime's transform - obj, err := managedFieldsTransform(obj) - if err != nil { - return obj, err - } - - // Then strip the large annotations - return stripAnnotations(obj) - } -} - -// StripAnnotations returns a cache transform function that removes -// memory-heavy annotation fields that aren't needed for controller operations. -// This significantly reduces memory usage in informer caches by removing: -// - kubectl.kubernetes.io/last-applied-configuration annotation (can be very large) -// -// Use this function as a DefaultTransform in controller-runtime cache.Options -// to reduce memory overhead across all cached objects. -// -// Example: -// -// cacheOptions := cache.Options{ -// DefaultTransform: cacheutil.StripAnnotations(), -// } -func StripAnnotations() toolscache.TransformFunc { - return func(obj interface{}) (interface{}, error) { - // Strip the large annotations - return stripAnnotations(obj) - } -} - -// ApplyStripAnnotationsTransform applies the strip transform directly to an object. -// This is a convenience function for cases where you need to strip fields -// from an object outside of the cache transform context. -// -// Note: This function never returns an error in practice, but returns error -// to satisfy the TransformFunc interface. -func ApplyStripAnnotationsTransform(obj client.Object) error { - transform := StripAnnotations() - _, err := transform(obj) - return err -} diff --git a/internal/shared/util/k8s/k8s.go b/internal/shared/util/k8s/k8s.go deleted file mode 100644 index a8a51a78a..000000000 --- a/internal/shared/util/k8s/k8s.go +++ /dev/null @@ -1,54 +0,0 @@ -package k8s - -import ( - "reflect" - - "k8s.io/apimachinery/pkg/api/equality" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// CheckForUnexpectedFieldChange compares two Kubernetes objects and returns true -// if their annotations, labels, or spec have changed. This is useful for detecting -// unexpected modifications during reconciliation. -// -// The function compares: -// - Annotations (via GetAnnotations) -// - Labels (via GetLabels) -// - Spec (using reflection to access the Spec field, with semantic equality) -// -// Status and finalizers are intentionally not compared, as these are expected -// to change during reconciliation. -// -// This function uses reflection to access the Spec field, so no explicit GetSpec() -// method is required. The objects must have a field named "Spec". -func CheckForUnexpectedFieldChange(a, b metav1.Object) bool { - if !equality.Semantic.DeepEqual(a.GetAnnotations(), b.GetAnnotations()) { - return true - } - if !equality.Semantic.DeepEqual(a.GetLabels(), b.GetLabels()) { - return true - } - - // Use reflection to access the Spec field - aVal := reflect.ValueOf(a) - bVal := reflect.ValueOf(b) - - // Handle pointer types - if aVal.Kind() == reflect.Ptr { - aVal = aVal.Elem() - } - if bVal.Kind() == reflect.Ptr { - bVal = bVal.Elem() - } - - // Get the Spec field from both objects - aSpec := aVal.FieldByName("Spec") - bSpec := bVal.FieldByName("Spec") - - // If either Spec field is invalid, return false (no change detected) - if !aSpec.IsValid() || !bSpec.IsValid() { - return false - } - - return !equality.Semantic.DeepEqual(aSpec.Interface(), bSpec.Interface()) -} diff --git a/internal/shared/util/k8s/k8s_test.go b/internal/shared/util/k8s/k8s_test.go deleted file mode 100644 index b4f599633..000000000 --- a/internal/shared/util/k8s/k8s_test.go +++ /dev/null @@ -1,227 +0,0 @@ -package k8s - -import ( - "testing" - - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - - ocv1 "github.com/operator-framework/operator-controller/api/v1" -) - -func TestCheckForUnexpectedFieldChange(t *testing.T) { - tests := []struct { - name string - a ocv1.ClusterExtension - b ocv1.ClusterExtension - expected bool - }{ - { - name: "no changes", - a: ocv1.ClusterExtension{ - ObjectMeta: metav1.ObjectMeta{ - Annotations: map[string]string{"key": "value"}, - Labels: map[string]string{"label": "value"}, - Finalizers: []string{"finalizer1"}, - }, - Spec: ocv1.ClusterExtensionSpec{ - Source: ocv1.SourceConfig{ - SourceType: "Catalog", - }, - }, - Status: ocv1.ClusterExtensionStatus{ - Conditions: []metav1.Condition{ - {Type: "Ready", Status: metav1.ConditionTrue}, - }, - }, - }, - b: ocv1.ClusterExtension{ - ObjectMeta: metav1.ObjectMeta{ - Annotations: map[string]string{"key": "value"}, - Labels: map[string]string{"label": "value"}, - Finalizers: []string{"finalizer2"}, // Different finalizer should not trigger change - }, - Spec: ocv1.ClusterExtensionSpec{ - Source: ocv1.SourceConfig{ - SourceType: "Catalog", - }, - }, - Status: ocv1.ClusterExtensionStatus{ - Conditions: []metav1.Condition{ - {Type: "Ready", Status: metav1.ConditionFalse}, // Different status should not trigger change - }, - }, - }, - expected: false, - }, - { - name: "annotation changed", - a: ocv1.ClusterExtension{ - ObjectMeta: metav1.ObjectMeta{ - Annotations: map[string]string{"key": "value1"}, - Labels: map[string]string{"label": "value"}, - }, - Spec: ocv1.ClusterExtensionSpec{}, - }, - b: ocv1.ClusterExtension{ - ObjectMeta: metav1.ObjectMeta{ - Annotations: map[string]string{"key": "value2"}, - Labels: map[string]string{"label": "value"}, - }, - Spec: ocv1.ClusterExtensionSpec{}, - }, - expected: true, - }, - { - name: "label changed", - a: ocv1.ClusterExtension{ - ObjectMeta: metav1.ObjectMeta{ - Annotations: map[string]string{"key": "value"}, - Labels: map[string]string{"label": "value1"}, - }, - Spec: ocv1.ClusterExtensionSpec{}, - }, - b: ocv1.ClusterExtension{ - ObjectMeta: metav1.ObjectMeta{ - Annotations: map[string]string{"key": "value"}, - Labels: map[string]string{"label": "value2"}, - }, - Spec: ocv1.ClusterExtensionSpec{}, - }, - expected: true, - }, - { - name: "spec changed", - a: ocv1.ClusterExtension{ - ObjectMeta: metav1.ObjectMeta{ - Annotations: map[string]string{"key": "value"}, - Labels: map[string]string{"label": "value"}, - }, - Spec: ocv1.ClusterExtensionSpec{ - Source: ocv1.SourceConfig{ - SourceType: "Catalog", - }, - }, - }, - b: ocv1.ClusterExtension{ - ObjectMeta: metav1.ObjectMeta{ - Annotations: map[string]string{"key": "value"}, - Labels: map[string]string{"label": "value"}, - }, - Spec: ocv1.ClusterExtensionSpec{ - Source: ocv1.SourceConfig{ - SourceType: "Image", - }, - }, - }, - expected: true, - }, - { - name: "status changed but annotations, labels, spec same", - a: ocv1.ClusterExtension{ - ObjectMeta: metav1.ObjectMeta{ - Annotations: map[string]string{"key": "value"}, - Labels: map[string]string{"label": "value"}, - }, - Spec: ocv1.ClusterExtensionSpec{}, - Status: ocv1.ClusterExtensionStatus{ - Conditions: []metav1.Condition{ - {Type: "Ready", Status: metav1.ConditionTrue}, - }, - }, - }, - b: ocv1.ClusterExtension{ - ObjectMeta: metav1.ObjectMeta{ - Annotations: map[string]string{"key": "value"}, - Labels: map[string]string{"label": "value"}, - }, - Spec: ocv1.ClusterExtensionSpec{}, - Status: ocv1.ClusterExtensionStatus{ - Conditions: []metav1.Condition{ - {Type: "Ready", Status: metav1.ConditionFalse}, - }, - }, - }, - expected: false, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - result := CheckForUnexpectedFieldChange(&tt.a, &tt.b) - if result != tt.expected { - t.Errorf("CheckForUnexpectedFieldChange() = %v, want %v", result, tt.expected) - } - }) - } -} - -func TestCheckForUnexpectedFieldChangeWithClusterCatalog(t *testing.T) { - tests := []struct { - name string - a ocv1.ClusterCatalog - b ocv1.ClusterCatalog - expected bool - }{ - { - name: "no changes", - a: ocv1.ClusterCatalog{ - ObjectMeta: metav1.ObjectMeta{ - Annotations: map[string]string{"key": "value"}, - Labels: map[string]string{"label": "value"}, - }, - Spec: ocv1.ClusterCatalogSpec{ - Source: ocv1.CatalogSource{ - Type: "Image", - }, - }, - }, - b: ocv1.ClusterCatalog{ - ObjectMeta: metav1.ObjectMeta{ - Annotations: map[string]string{"key": "value"}, - Labels: map[string]string{"label": "value"}, - }, - Spec: ocv1.ClusterCatalogSpec{ - Source: ocv1.CatalogSource{ - Type: "Image", - }, - }, - }, - expected: false, - }, - { - name: "spec changed", - a: ocv1.ClusterCatalog{ - ObjectMeta: metav1.ObjectMeta{ - Annotations: map[string]string{"key": "value"}, - Labels: map[string]string{"label": "value"}, - }, - Spec: ocv1.ClusterCatalogSpec{ - Source: ocv1.CatalogSource{ - Type: "Image", - }, - }, - }, - b: ocv1.ClusterCatalog{ - ObjectMeta: metav1.ObjectMeta{ - Annotations: map[string]string{"key": "value"}, - Labels: map[string]string{"label": "value"}, - }, - Spec: ocv1.ClusterCatalogSpec{ - Source: ocv1.CatalogSource{ - Type: "Git", - }, - }, - }, - expected: true, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - result := CheckForUnexpectedFieldChange(&tt.a, &tt.b) - if result != tt.expected { - t.Errorf("CheckForUnexpectedFieldChange() = %v, want %v", result, tt.expected) - } - }) - } -} From facd1e62d968fb3ea41af5a9c61d28976d0a0472 Mon Sep 17 00:00:00 2001 From: Forrest Babcock Date: Fri, 5 Dec 2025 14:25:33 -0500 Subject: [PATCH 3/6] Revert "Merge pull request #570 from openshift-bot/synchronize-upstream" This reverts commit 93bf7ab681583c170baa5dce41ae45bc96314b92, reversing changes made to 496965e52fbf2e391d294578f6d94a20011a3280. --- api/v1/clusterextensionrevision_types.go | 134 ++-------- commitchecker.yaml | 2 +- hack/test/install-prometheus.sh | 5 +- hack/tools/crd-generator/README.md | 20 -- hack/tools/crd-generator/main.go | 104 ++------ hack/tools/crd-generator/main_test.go | 247 ------------------ ...ramework.io_clusterextensionrevisions.yaml | 126 ++------- ...disruptionbudget-olmv1-system-catalogd.yml | 22 -- ...udget-olmv1-system-operator-controller.yml | 22 -- helm/olmv1/values.yaml | 6 - manifests/experimental-e2e.yaml | 160 ++---------- manifests/experimental.yaml | 160 ++---------- manifests/standard-e2e.yaml | 34 --- manifests/standard.yaml | 34 --- .../catalogd/manifests-experimental.yaml | 17 -- openshift/catalogd/manifests.yaml | 17 -- .../manifests-experimental.yaml | 17 -- openshift/operator-controller/manifests.yaml | 17 -- requirements.txt | 2 +- test/e2e/cluster_extension_install_test.go | 15 +- test/helpers/helpers.go | 18 +- 21 files changed, 135 insertions(+), 1044 deletions(-) delete mode 100644 helm/olmv1/templates/poddisruptionbudget-olmv1-system-catalogd.yml delete mode 100644 helm/olmv1/templates/poddisruptionbudget-olmv1-system-operator-controller.yml diff --git a/api/v1/clusterextensionrevision_types.go b/api/v1/clusterextensionrevision_types.go index 69a116300..e048e1b54 100644 --- a/api/v1/clusterextensionrevision_types.go +++ b/api/v1/clusterextensionrevision_types.go @@ -24,15 +24,11 @@ import ( const ( ClusterExtensionRevisionKind = "ClusterExtensionRevision" - // ClusterExtensionRevisionTypeAvailable is the condition type that represents whether the - // ClusterExtensionRevision is available and has been successfully rolled out. + // Condition Types ClusterExtensionRevisionTypeAvailable = "Available" - - // ClusterExtensionRevisionTypeSucceeded is the condition type that represents whether the - // ClusterExtensionRevision rollout has succeeded. ClusterExtensionRevisionTypeSucceeded = "Succeeded" - // Condition reasons + // Condition Reasons ClusterExtensionRevisionReasonAvailable = "Available" ClusterExtensionRevisionReasonReconcileFailure = "ReconcileFailure" ClusterExtensionRevisionReasonRevisionValidationFailure = "RevisionValidationFailure" @@ -48,47 +44,22 @@ const ( // ClusterExtensionRevisionSpec defines the desired state of ClusterExtensionRevision. type ClusterExtensionRevisionSpec struct { - // lifecycleState specifies the lifecycle state of the ClusterExtensionRevision. - // - // When set to "Active" (the default), the revision is actively managed and reconciled. - // When set to "Archived", the revision is inactive and any resources not managed by a subsequent revision are deleted. - // The revision is removed from the owner list of all objects previously under management. - // All objects that did not transition to a succeeding revision are deleted. - // - // Once a revision is set to "Archived", it cannot be un-archived. + // Specifies the lifecycle state of the ClusterExtensionRevision. // // +kubebuilder:default="Active" - // +kubebuilder:validation:Enum=Active;Archived - // +kubebuilder:validation:XValidation:rule="oldSelf == 'Active' || oldSelf == 'Archived' && oldSelf == self", message="cannot un-archive" + // +kubebuilder:validation:Enum=Active;Paused;Archived + // +kubebuilder:validation:XValidation:rule="oldSelf == 'Active' || oldSelf == 'Paused' || oldSelf == 'Archived' && oldSelf == self", message="can not un-archive" LifecycleState ClusterExtensionRevisionLifecycleState `json:"lifecycleState,omitempty"` - - // revision is a required, immutable sequence number representing a specific revision - // of the parent ClusterExtension. - // - // The revision field must be a positive integer. - // Each ClusterExtensionRevision belonging to the same parent ClusterExtension must have a unique revision number. - // The revision number must always be the previous revision number plus one, or 1 for the first revision. + // Revision is a sequence number representing a specific revision of the ClusterExtension instance. + // Must be positive. Each ClusterExtensionRevision of the same parent ClusterExtension needs to have + // a unique value assigned. It is immutable after creation. The new revision number must always be previous revision +1. // // +kubebuilder:validation:Required // +kubebuilder:validation:Minimum:=1 // +kubebuilder:validation:XValidation:rule="self == oldSelf", message="revision is immutable" Revision int64 `json:"revision"` - - // phases is an optional, immutable list of phases that group objects to be applied together. - // - // Objects are organized into phases based on their Group-Kind. Common phases include: - // - namespaces: Namespace objects - // - policies: ResourceQuota, LimitRange, NetworkPolicy objects - // - rbac: ServiceAccount, Role, RoleBinding, ClusterRole, ClusterRoleBinding objects - // - crds: CustomResourceDefinition objects - // - storage: PersistentVolume, PersistentVolumeClaim, StorageClass objects - // - deploy: Deployment, StatefulSet, DaemonSet, Service, ConfigMap, Secret objects - // - publish: Ingress, APIService, Route, Webhook objects - // - // All objects in a phase are applied in no particular order. - // The revision progresses to the next phase only after all objects in the current phase pass their readiness probes. - // - // Once set, even if empty, the phases field is immutable. + // Phases are groups of objects that will be applied at the same time. + // All objects in the phase will have to pass their probes in order to progress to the next phase. // // +kubebuilder:validation:XValidation:rule="self == oldSelf || oldSelf.size() == 0", message="phases is immutable" // +listType=map @@ -104,62 +75,33 @@ const ( // ClusterExtensionRevisionLifecycleStateActive / "Active" is the default lifecycle state. ClusterExtensionRevisionLifecycleStateActive ClusterExtensionRevisionLifecycleState = "Active" // ClusterExtensionRevisionLifecycleStatePaused / "Paused" disables reconciliation of the ClusterExtensionRevision. - // Object changes will not be reconciled. However, status updates will be propagated. + // Only Status updates will still propagated, but object changes will not be reconciled. ClusterExtensionRevisionLifecycleStatePaused ClusterExtensionRevisionLifecycleState = "Paused" - // ClusterExtensionRevisionLifecycleStateArchived / "Archived" archives the revision for historical or auditing purposes. - // The revision is removed from the owner list of all other objects previously under management and all objects - // that did not transition to a succeeding revision are deleted. + // ClusterExtensionRevisionLifecycleStateArchived / "Archived" disables reconciliation while also "scaling to zero", + // which deletes all objects that are not excluded via the pausedFor property and + // removes itself from the owner list of all other objects previously under management. ClusterExtensionRevisionLifecycleStateArchived ClusterExtensionRevisionLifecycleState = "Archived" ) -// ClusterExtensionRevisionPhase represents a group of objects that are applied together. The phase is considered -// complete only after all objects pass their status probes. +// ClusterExtensionRevisionPhase are groups of objects that will be applied at the same time. +// All objects in the a phase will have to pass their probes in order to progress to the next phase. type ClusterExtensionRevisionPhase struct { - // name is a required identifier for this phase. - // - // phase names must follow the DNS label standard as defined in [RFC 1123]. - // They must contain only lowercase alphanumeric characters or hyphens (-), - // start and end with an alphanumeric character, and be no longer than 63 characters. - // - // Common phase names include: namespaces, policies, rbac, crds, storage, deploy, publish. - // - // [RFC 1123]: https://tools.ietf.org/html/rfc1123 + // Name identifies this phase. // // +kubebuilder:validation:MaxLength=63 // +kubebuilder:validation:Pattern=`^[a-z]([-a-z0-9]*[a-z0-9])?$` Name string `json:"name"` - - // objects is a required list of all Kubernetes objects that belong to this phase. - // - // All objects in this list are applied to the cluster in no particular order. + // Objects are a list of all the objects within this phase. Objects []ClusterExtensionRevisionObject `json:"objects"` } -// ClusterExtensionRevisionObject represents a Kubernetes object to be applied as part -// of a phase, along with its collision protection settings. +// ClusterExtensionRevisionObject contains an object and settings for it. type ClusterExtensionRevisionObject struct { - // object is a required embedded Kubernetes object to be applied. - // - // This object must be a valid Kubernetes resource with apiVersion, kind, and metadata fields. - // // +kubebuilder:validation:EmbeddedResource // +kubebuilder:pruning:PreserveUnknownFields Object unstructured.Unstructured `json:"object"` - - // collisionProtection controls whether the operator can adopt and modify objects - // that already exist on the cluster. - // - // When set to "Prevent" (the default), the operator only manages objects it created itself. - // This prevents ownership collisions. - // - // When set to "IfNoController", the operator can adopt and modify pre-existing objects - // that are not owned by another controller. - // This is useful for taking over management of manually-created resources. - // - // When set to "None", the operator can adopt and modify any pre-existing object, even if - // owned by another controller. - // Use this setting with extreme caution as it may cause multiple controllers to fight over - // the same resource, resulting in increased load on the API server and etcd. + // CollisionProtection controls whether OLM can adopt and modify objects + // already existing on the cluster or even owned by another controller. // // +kubebuilder:default="Prevent" // +kubebuilder:validation:Enum=Prevent;IfNoController;None @@ -186,27 +128,6 @@ const ( // ClusterExtensionRevisionStatus defines the observed state of a ClusterExtensionRevision. type ClusterExtensionRevisionStatus struct { - // conditions is an optional list of status conditions describing the state of the - // ClusterExtensionRevision. - // - // The Progressing condition represents whether the revision is actively rolling out: - // - When status is True and reason is Progressing, the revision rollout is actively making progress and is in transition. - // - When Progressing is not present, the revision is not currently in transition. - // - // The Available condition represents whether the revision has been successfully rolled out and is available: - // - When status is True and reason is Available, the revision has been successfully rolled out and all objects pass their readiness probes. - // - When status is False and reason is Incomplete, the revision rollout has not yet completed but no specific failures have been detected. - // - When status is False and reason is ProbeFailure, one or more objects are failing their readiness probes during rollout. - // - When status is False and reason is ReconcileFailure, the revision has encountered a general reconciliation failure. - // - When status is False and reason is RevisionValidationFailure, the revision failed preflight validation checks. - // - When status is False and reason is PhaseValidationError, a phase within the revision failed preflight validation checks. - // - When status is False and reason is ObjectCollisions, objects in the revision collide with existing cluster objects that cannot be adopted. - // - When status is Unknown and reason is Archived, the revision has been archived and its objects have been torn down. - // - When status is Unknown and reason is Migrated, the revision was migrated from an existing release and object status probe results have not yet been observed. - // - // The Succeeded condition represents whether the revision has successfully completed its rollout: - // - When status is True and reason is RolloutSuccess, the revision has successfully completed its rollout. This condition is set once and persists even if the revision later becomes unavailable. - // // +listType=map // +listMapKey=type // +optional @@ -216,24 +137,19 @@ type ClusterExtensionRevisionStatus struct { // +kubebuilder:object:root=true // +kubebuilder:resource:scope=Cluster // +kubebuilder:subresource:status + +// ClusterExtensionRevision is the Schema for the clusterextensionrevisions API // +kubebuilder:printcolumn:name="Available",type=string,JSONPath=`.status.conditions[?(@.type=='Available')].status` // +kubebuilder:printcolumn:name=Age,type=date,JSONPath=`.metadata.creationTimestamp` - -// ClusterExtensionRevision represents an immutable snapshot of Kubernetes objects -// for a specific version of a ClusterExtension. Each revision contains objects -// organized into phases that roll out sequentially. The same object can only be managed by a single revision -// at a time. Ownership of objects is transitioned from one revision to the next as the extension is upgraded -// or reconfigured. Once the latest revision has rolled out successfully, previous active revisions are archived for -// posterity. type ClusterExtensionRevision struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata,omitempty"` - // spec defines the desired state of the ClusterExtensionRevision. + // spec is an optional field that defines the desired state of the ClusterExtension. // +optional Spec ClusterExtensionRevisionSpec `json:"spec,omitempty"` - // status is optional and defines the observed state of the ClusterExtensionRevision. + // status is an optional field that defines the observed state of the ClusterExtension. // +optional Status ClusterExtensionRevisionStatus `json:"status,omitempty"` } diff --git a/commitchecker.yaml b/commitchecker.yaml index c580f04e4..0b1a24c57 100644 --- a/commitchecker.yaml +++ b/commitchecker.yaml @@ -1,4 +1,4 @@ -expectedMergeBase: 34394ce0a7067e1f1622dc2c381a9a12439b10d2 +expectedMergeBase: 045989d84a7570b1cfddeee47eae64d47245aff2 upstreamBranch: main upstreamOrg: operator-framework upstreamRepo: operator-controller diff --git a/hack/test/install-prometheus.sh b/hack/test/install-prometheus.sh index e2e13c96f..f458b2d01 100755 --- a/hack/test/install-prometheus.sh +++ b/hack/test/install-prometheus.sh @@ -38,11 +38,8 @@ echo "Patching namespace to ${PROMETHEUS_NAMESPACE}..." echo "Applying Prometheus base..." kubectl apply -k "$TMPDIR" --server-side -echo "Waiting for Prometheus Operator deployment to become available..." -kubectl wait --for=condition=Available deployment/prometheus-operator -n "$PROMETHEUS_NAMESPACE" --timeout=180s - echo "Waiting for Prometheus Operator pod to become ready..." -kubectl wait --for=condition=Ready pod -n "$PROMETHEUS_NAMESPACE" -l app.kubernetes.io/name=prometheus-operator --timeout=120s +kubectl wait --for=condition=Ready pod -n "$PROMETHEUS_NAMESPACE" -l app.kubernetes.io/name=prometheus-operator echo "Applying prometheus Helm chart..." ${HELM} template prometheus helm/prometheus ${PROMETHEUS_VALUES} | sed "s/cert-git-version/cert-${VERSION}/g" | kubectl apply -f - diff --git a/hack/tools/crd-generator/README.md b/hack/tools/crd-generator/README.md index 83fb63e21..433472167 100644 --- a/hack/tools/crd-generator/README.md +++ b/hack/tools/crd-generator/README.md @@ -33,14 +33,6 @@ A semi-colon separated list of enumerations, similar to the `+kubebuilder:valida An XValidation scheme, similar to the `+kubebuilder:validation:XValidation` scheme, but more limited. -* `Optional` - -Indicating that this field should not be listed as required in its parent. - -* `Required` - -Indicating that this field should be listed as required in its parent. - ## Experimental Description * Start Tag: `` @@ -52,18 +44,6 @@ All text between the tags is included in the experimental CRD, but removed from This is only useful if the field is included in the standard CRD, but there's additional meaning in the experimental CRD when feature gates are enabled. -## Standard Description - -* Start Tag: `` -* End Tag: `` - -Descriptive text that is only included as part of the field description within the standard CRD. -All text between the tags is included in the standard CRD, but removed from the experimental CRD. - -This is useful if the field is included in the standard CRD and has differing meaning than when the -field is used in the experimental CRD when feature gates are enabled. - - ## Exclude from CRD Description * Start Tag: `` diff --git a/hack/tools/crd-generator/main.go b/hack/tools/crd-generator/main.go index edc254494..9687489f4 100644 --- a/hack/tools/crd-generator/main.go +++ b/hack/tools/crd-generator/main.go @@ -23,7 +23,6 @@ import ( "log" "os" "regexp" - "slices" "strings" apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" @@ -137,7 +136,7 @@ func runGenerator(args ...string) { if channel == StandardChannel && strings.Contains(version.Name, "alpha") { channelCrd.Spec.Versions[i].Served = false } - channelCrd.Spec.Versions[i].Schema.OpenAPIV3Schema.Properties = opconTweaksMap(channel, channelCrd.Spec.Versions[i].Schema.OpenAPIV3Schema) + version.Schema.OpenAPIV3Schema.Properties = opconTweaksMap(channel, version.Schema.OpenAPIV3Schema.Properties) } conv, err := crd.AsVersion(*channelCrd, apiextensionsv1.SchemeGroupVersion) @@ -180,51 +179,25 @@ func runGenerator(args ...string) { } } -// Apply Opcon specific tweaks to all properties in a map, and update the parent schema's required list according to opcon tags. -// For opcon validation optional/required tags, the parent schema's required list is mutated directly. -// TODO: if we need to support other conditions from opconTweaks, it will likely be preferable to convey the parent schema to facilitate direct alteration. -func opconTweaksMap(channel string, parentSchema *apiextensionsv1.JSONSchemaProps) map[string]apiextensionsv1.JSONSchemaProps { - props := parentSchema.Properties - +func opconTweaksMap(channel string, props map[string]apiextensionsv1.JSONSchemaProps) map[string]apiextensionsv1.JSONSchemaProps { for name := range props { jsonProps := props[name] - p, reqStatus := opconTweaks(channel, name, jsonProps) + p := opconTweaks(channel, name, jsonProps) if p == nil { delete(props, name) } else { props[name] = *p - // Update required list based on tag - switch reqStatus { - case statusRequired: - if !slices.Contains(parentSchema.Required, name) { - parentSchema.Required = append(parentSchema.Required, name) - } - case statusOptional: - parentSchema.Required = slices.DeleteFunc(parentSchema.Required, func(s string) bool { return s == name }) - default: - // "" (unspecified) means keep existing status - } } } return props } -const ( - statusRequired = "required" - statusOptional = "optional" - statusNoOpinion = "" -) - // Custom Opcon API Tweaks for tags prefixed with `") { - return nil, statusNoOpinion + return nil } } @@ -246,7 +219,7 @@ func opconTweaks(channel string, name string, jsonProps apiextensionsv1.JSONSche numValid++ jsonProps.Enum = []apiextensionsv1.JSON{} - for val := range strings.SplitSeq(enumMatch[1], ";") { + for _, val := range strings.Split(enumMatch[1], ";") { jsonProps.Enum = append(jsonProps.Enum, apiextensionsv1.JSON{Raw: []byte("\"" + val + "\"")}) } } @@ -264,28 +237,6 @@ func opconTweaks(channel string, name string, jsonProps apiextensionsv1.JSONSche Rule: celMatch[2], }) } - optReqRe := regexp.MustCompile(validationPrefix + "(Optional|Required)>") - optReqMatches := optReqRe.FindAllStringSubmatch(jsonProps.Description, 64) - hasOptional := false - hasRequired := false - for _, optReqMatch := range optReqMatches { - if len(optReqMatch) != 2 { - log.Fatalf("Invalid %s Optional/Required tag for %s", validationPrefix, name) - } - - numValid++ - switch optReqMatch[1] { - case "Optional": - hasOptional = true - requiredStatus = statusOptional - case "Required": - hasRequired = true - requiredStatus = statusRequired - } - } - if hasOptional && hasRequired { - log.Fatalf("Field %s has both Optional and Required validation tags for channel %s", name, channel) - } } if numValid < numExpressions { @@ -295,43 +246,34 @@ func opconTweaks(channel string, name string, jsonProps apiextensionsv1.JSONSche jsonProps.Description = formatDescription(jsonProps.Description, channel, name) if len(jsonProps.Properties) > 0 { - jsonProps.Properties = opconTweaksMap(channel, &jsonProps) + jsonProps.Properties = opconTweaksMap(channel, jsonProps.Properties) } else if jsonProps.Items != nil && jsonProps.Items.Schema != nil { - jsonProps.Items.Schema, _ = opconTweaks(channel, name, *jsonProps.Items.Schema) + jsonProps.Items.Schema = opconTweaks(channel, name, *jsonProps.Items.Schema) } - return &jsonProps, requiredStatus + return &jsonProps } func formatDescription(description string, channel string, name string) string { - tagset := []struct { - channel string - tag string - }{ - {channel: ExperimentalChannel, tag: "opcon:standard:description"}, - {channel: StandardChannel, tag: "opcon:experimental:description"}, - } - for _, ts := range tagset { - startTag := fmt.Sprintf("<%s>", ts.tag) - endTag := fmt.Sprintf("", ts.tag) - if channel == ts.channel && strings.Contains(description, ts.tag) { - regexPattern := `\n*` + regexp.QuoteMeta(startTag) + `(?s:(.*?))` + regexp.QuoteMeta(endTag) + `\n*` - re := regexp.MustCompile(regexPattern) - match := re.FindStringSubmatch(description) - if len(match) != 2 { - log.Fatalf("Invalid %s tag for %s", startTag, name) - } - description = re.ReplaceAllString(description, "\n\n") - } else { - description = strings.ReplaceAll(description, startTag, "") - description = strings.ReplaceAll(description, endTag, "") + startTag := "" + endTag := "" + if channel == StandardChannel && strings.Contains(description, startTag) { + regexPattern := `\n*` + regexp.QuoteMeta(startTag) + `(?s:(.*?))` + regexp.QuoteMeta(endTag) + `\n*` + re := regexp.MustCompile(regexPattern) + match := re.FindStringSubmatch(description) + if len(match) != 2 { + log.Fatalf("Invalid tag for %s", name) } + description = re.ReplaceAllString(description, "\n\n") + } else { + description = strings.ReplaceAll(description, startTag, "") + description = strings.ReplaceAll(description, endTag, "") } // Comments within "opcon:util:excludeFromCRD" tag are not included in the generated CRD and all trailing \n operators before // and after the tags are removed and replaced with three \n operators. - startTag := "" - endTag := "" + startTag = "" + endTag = "" if strings.Contains(description, startTag) { regexPattern := `\n*` + regexp.QuoteMeta(startTag) + `(?s:(.*?))` + regexp.QuoteMeta(endTag) + `\n*` re := regexp.MustCompile(regexPattern) diff --git a/hack/tools/crd-generator/main_test.go b/hack/tools/crd-generator/main_test.go index 99f71a497..d2eb28d61 100644 --- a/hack/tools/crd-generator/main_test.go +++ b/hack/tools/crd-generator/main_test.go @@ -8,7 +8,6 @@ import ( "testing" "github.com/stretchr/testify/require" - apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" ) const controllerToolsVersion = "v0.19.0" @@ -76,252 +75,6 @@ func TestTags(t *testing.T) { compareFiles(t, f1, f2) } -func TestFormatDescription(t *testing.T) { - tests := []struct { - name string - channel string - fieldName string - input string - expected string - }{ - { - name: "standard channel removes experimental description", - channel: StandardChannel, - fieldName: "testField", - input: "Base description.\n\nExperimental content.\n\nMore content.", - expected: "Base description.\n\nMore content.", - }, - { - name: "experimental channel removes standard description", - channel: ExperimentalChannel, - fieldName: "testField", - input: "Base description.\n\nStandard content.\n\nMore content.", - expected: "Base description.\n\nMore content.", - }, - { - name: "excludeFromCRD tag removes content", - channel: StandardChannel, - fieldName: "testField", - input: "Before.\n\n\nExcluded content.\n\n\nAfter.", - expected: "Before.\n\nAfter.", - }, - { - name: "three hyphens removes trailing content", - channel: StandardChannel, - fieldName: "testField", - input: "Visible content.\n---\nHidden content after separator.", - expected: "Visible content.", - }, - { - name: "multiple newlines collapsed to double", - channel: StandardChannel, - fieldName: "testField", - input: "Line one.\n\n\n\n\nLine two.", - expected: "Line one.\n\nLine two.", - }, - { - name: "trailing newlines removed", - channel: StandardChannel, - fieldName: "testField", - input: "Content with trailing newlines.\n\n\n", - expected: "Content with trailing newlines.", - }, - { - name: "combined tags and formatting", - channel: ExperimentalChannel, - fieldName: "testField", - input: "Main text.\n\nStandard only.\n\n\n\n\nInternal notes.\n\n\nFinal text.\n\n\n", - expected: "Main text.\n\nFinal text.", - }, - { - name: "empty input", - channel: StandardChannel, - fieldName: "testField", - input: "", - expected: "", - }, - { - name: "no tags plain text", - channel: StandardChannel, - fieldName: "testField", - input: "Simple description without any tags.", - expected: "Simple description without any tags.", - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - result := formatDescription(tt.input, tt.channel, tt.fieldName) - require.Equal(t, tt.expected, result) - }) - } -} - -// TestOpconTweaksOptionalRequired tests the opconTweaks function for handling -// optional and required tags in field descriptions. -func TestOpconTweaksOptionalRequired(t *testing.T) { - tests := []struct { - name string - channel string - fieldName string - description string - expectedStatus string - }{ - { - name: "optional tag in standard channel", - channel: StandardChannel, - fieldName: "testField", - description: "Field description.\n", - expectedStatus: statusOptional, - }, - { - name: "required tag in standard channel", - channel: StandardChannel, - fieldName: "testField", - description: "Field description.\n", - expectedStatus: statusRequired, - }, - { - name: "optional tag in experimental channel", - channel: ExperimentalChannel, - fieldName: "testField", - description: "Field description.\n", - expectedStatus: statusOptional, - }, - { - name: "required tag in experimental channel", - channel: ExperimentalChannel, - fieldName: "testField", - description: "Field description.\n", - expectedStatus: statusRequired, - }, - { - name: "no validation tag", - channel: StandardChannel, - fieldName: "testField", - description: "Field description without tags.", - expectedStatus: statusNoOpinion, - }, - { - name: "experimental tag in standard channel ignored", - channel: StandardChannel, - fieldName: "testField", - description: "Field description.\n", - expectedStatus: statusNoOpinion, - }, - { - name: "standard tag in experimental channel ignored", - channel: ExperimentalChannel, - fieldName: "testField", - description: "Field description.\n", - expectedStatus: statusNoOpinion, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - jsonProps := apiextensionsv1.JSONSchemaProps{ - Description: tt.description, - Type: "string", - } - _, status := opconTweaks(tt.channel, tt.fieldName, jsonProps) - require.Equal(t, tt.expectedStatus, status) - }) - } -} - -// TestOpconTweaksMapRequiredList tests the opconTweaksMap function for correctly -// updating the required list based on field descriptions. -func TestOpconTweaksMapRequiredList(t *testing.T) { - tests := []struct { - name string - channel string - props map[string]apiextensionsv1.JSONSchemaProps - existingRequired []string - expectedRequired []string - }{ - { - name: "add field to required list if not required but opcon required", - channel: StandardChannel, - props: map[string]apiextensionsv1.JSONSchemaProps{ - "field1": { - Description: "Field 1.\n", - Type: "string", - }, - }, - existingRequired: []string{}, - expectedRequired: []string{"field1"}, - }, - { - name: "remove field from required list if required but opcon optional", - channel: StandardChannel, - props: map[string]apiextensionsv1.JSONSchemaProps{ - "field1": { - Description: "Field 1.\n", - Type: "string", - }, - }, - existingRequired: []string{"field1"}, - expectedRequired: []string{}, - }, - { - name: "preserve existing required without overriding opcon tag", - channel: StandardChannel, - props: map[string]apiextensionsv1.JSONSchemaProps{ - "field1": { - Description: "Field 1 without tag.", - Type: "string", - }, - }, - existingRequired: []string{"field1"}, - expectedRequired: []string{"field1"}, - }, - { - name: "multiple fields with mixed optional/required tags", - channel: StandardChannel, - props: map[string]apiextensionsv1.JSONSchemaProps{ - "field1": { - Description: "Field 1.\n", - Type: "string", - }, - "field2": { - Description: "Field 2.\n", - Type: "string", - }, - "field3": { - Description: "Field 3 without tag.", - Type: "string", - }, - }, - existingRequired: []string{"field2", "field3"}, - expectedRequired: []string{"field3", "field1"}, - }, - { - name: "no duplicate in required list when tag/opcon-tag both required", - channel: StandardChannel, - props: map[string]apiextensionsv1.JSONSchemaProps{ - "field1": { - Description: "Field 1.\n", - Type: "string", - }, - }, - existingRequired: []string{"field1"}, - expectedRequired: []string{"field1"}, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - parentSchema := &apiextensionsv1.JSONSchemaProps{ - Properties: tt.props, - Required: tt.existingRequired, - } - opconTweaksMap(tt.channel, parentSchema) - require.ElementsMatch(t, tt.expectedRequired, parentSchema.Required) - }) - } -} - func compareFiles(t *testing.T, file1, file2 string) { f1, err := os.Open(file1) require.NoError(t, err) diff --git a/helm/olmv1/base/operator-controller/crd/experimental/olm.operatorframework.io_clusterextensionrevisions.yaml b/helm/olmv1/base/operator-controller/crd/experimental/olm.operatorframework.io_clusterextensionrevisions.yaml index 1a3a8b021..b25e57903 100644 --- a/helm/olmv1/base/operator-controller/crd/experimental/olm.operatorframework.io_clusterextensionrevisions.yaml +++ b/helm/olmv1/base/operator-controller/crd/experimental/olm.operatorframework.io_clusterextensionrevisions.yaml @@ -25,13 +25,8 @@ spec: name: v1 schema: openAPIV3Schema: - description: |- - ClusterExtensionRevision represents an immutable snapshot of Kubernetes objects - for a specific version of a ClusterExtension. Each revision contains objects - organized into phases that roll out sequentially. The same object can only be managed by a single revision - at a time. Ownership of objects is transitioned from one revision to the next as the extension is upgraded - or reconfigured. Once the latest revision has rolled out successfully, previous active revisions are archived for - posterity. + description: ClusterExtensionRevision is the Schema for the clusterextensionrevisions + API properties: apiVersion: description: |- @@ -51,100 +46,53 @@ spec: metadata: type: object spec: - description: spec defines the desired state of the ClusterExtensionRevision. + description: spec is an optional field that defines the desired state + of the ClusterExtension. properties: lifecycleState: default: Active - description: |- - lifecycleState specifies the lifecycle state of the ClusterExtensionRevision. - - When set to "Active" (the default), the revision is actively managed and reconciled. - When set to "Archived", the revision is inactive and any resources not managed by a subsequent revision are deleted. - The revision is removed from the owner list of all objects previously under management. - All objects that did not transition to a succeeding revision are deleted. - - Once a revision is set to "Archived", it cannot be un-archived. + description: Specifies the lifecycle state of the ClusterExtensionRevision. enum: - Active + - Paused - Archived type: string x-kubernetes-validations: - - message: cannot un-archive - rule: oldSelf == 'Active' || oldSelf == 'Archived' && oldSelf == - self + - message: can not un-archive + rule: oldSelf == 'Active' || oldSelf == 'Paused' || oldSelf == 'Archived' + && oldSelf == self phases: description: |- - phases is an optional, immutable list of phases that group objects to be applied together. - - Objects are organized into phases based on their Group-Kind. Common phases include: - - namespaces: Namespace objects - - policies: ResourceQuota, LimitRange, NetworkPolicy objects - - rbac: ServiceAccount, Role, RoleBinding, ClusterRole, ClusterRoleBinding objects - - crds: CustomResourceDefinition objects - - storage: PersistentVolume, PersistentVolumeClaim, StorageClass objects - - deploy: Deployment, StatefulSet, DaemonSet, Service, ConfigMap, Secret objects - - publish: Ingress, APIService, Route, Webhook objects - - All objects in a phase are applied in no particular order. - The revision progresses to the next phase only after all objects in the current phase pass their readiness probes. - - Once set, even if empty, the phases field is immutable. + Phases are groups of objects that will be applied at the same time. + All objects in the phase will have to pass their probes in order to progress to the next phase. items: description: |- - ClusterExtensionRevisionPhase represents a group of objects that are applied together. The phase is considered - complete only after all objects pass their status probes. + ClusterExtensionRevisionPhase are groups of objects that will be applied at the same time. + All objects in the a phase will have to pass their probes in order to progress to the next phase. properties: name: - description: |- - name is a required identifier for this phase. - - phase names must follow the DNS label standard as defined in [RFC 1123]. - They must contain only lowercase alphanumeric characters or hyphens (-), - start and end with an alphanumeric character, and be no longer than 63 characters. - - Common phase names include: namespaces, policies, rbac, crds, storage, deploy, publish. - - [RFC 1123]: https://tools.ietf.org/html/rfc1123 + description: Name identifies this phase. maxLength: 63 pattern: ^[a-z]([-a-z0-9]*[a-z0-9])?$ type: string objects: - description: |- - objects is a required list of all Kubernetes objects that belong to this phase. - - All objects in this list are applied to the cluster in no particular order. + description: Objects are a list of all the objects within this + phase. items: - description: |- - ClusterExtensionRevisionObject represents a Kubernetes object to be applied as part - of a phase, along with its collision protection settings. + description: ClusterExtensionRevisionObject contains an object + and settings for it. properties: collisionProtection: default: Prevent description: |- - collisionProtection controls whether the operator can adopt and modify objects - that already exist on the cluster. - - When set to "Prevent" (the default), the operator only manages objects it created itself. - This prevents ownership collisions. - - When set to "IfNoController", the operator can adopt and modify pre-existing objects - that are not owned by another controller. - This is useful for taking over management of manually-created resources. - - When set to "None", the operator can adopt and modify any pre-existing object, even if - owned by another controller. - Use this setting with extreme caution as it may cause multiple controllers to fight over - the same resource, resulting in increased load on the API server and etcd. + CollisionProtection controls whether OLM can adopt and modify objects + already existing on the cluster or even owned by another controller. enum: - Prevent - IfNoController - None type: string object: - description: |- - object is a required embedded Kubernetes object to be applied. - - This object must be a valid Kubernetes resource with apiVersion, kind, and metadata fields. type: object x-kubernetes-embedded-resource: true x-kubernetes-preserve-unknown-fields: true @@ -165,12 +113,9 @@ spec: rule: self == oldSelf || oldSelf.size() == 0 revision: description: |- - revision is a required, immutable sequence number representing a specific revision - of the parent ClusterExtension. - - The revision field must be a positive integer. - Each ClusterExtensionRevision belonging to the same parent ClusterExtension must have a unique revision number. - The revision number must always be the previous revision number plus one, or 1 for the first revision. + Revision is a sequence number representing a specific revision of the ClusterExtension instance. + Must be positive. Each ClusterExtensionRevision of the same parent ClusterExtension needs to have + a unique value assigned. It is immutable after creation. The new revision number must always be previous revision +1. format: int64 minimum: 1 type: integer @@ -181,31 +126,10 @@ spec: - revision type: object status: - description: status is optional and defines the observed state of the - ClusterExtensionRevision. + description: status is an optional field that defines the observed state + of the ClusterExtension. properties: conditions: - description: |- - conditions is an optional list of status conditions describing the state of the - ClusterExtensionRevision. - - The Progressing condition represents whether the revision is actively rolling out: - - When status is True and reason is Progressing, the revision rollout is actively making progress and is in transition. - - When Progressing is not present, the revision is not currently in transition. - - The Available condition represents whether the revision has been successfully rolled out and is available: - - When status is True and reason is Available, the revision has been successfully rolled out and all objects pass their readiness probes. - - When status is False and reason is Incomplete, the revision rollout has not yet completed but no specific failures have been detected. - - When status is False and reason is ProbeFailure, one or more objects are failing their readiness probes during rollout. - - When status is False and reason is ReconcileFailure, the revision has encountered a general reconciliation failure. - - When status is False and reason is RevisionValidationFailure, the revision failed preflight validation checks. - - When status is False and reason is PhaseValidationError, a phase within the revision failed preflight validation checks. - - When status is False and reason is ObjectCollisions, objects in the revision collide with existing cluster objects that cannot be adopted. - - When status is Unknown and reason is Archived, the revision has been archived and its objects have been torn down. - - When status is Unknown and reason is Migrated, the revision was migrated from an existing release and object status probe results have not yet been observed. - - The Succeeded condition represents whether the revision has successfully completed its rollout: - - When status is True and reason is RolloutSuccess, the revision has successfully completed its rollout. This condition is set once and persists even if the revision later becomes unavailable. items: description: Condition contains details for one aspect of the current state of this API Resource. diff --git a/helm/olmv1/templates/poddisruptionbudget-olmv1-system-catalogd.yml b/helm/olmv1/templates/poddisruptionbudget-olmv1-system-catalogd.yml deleted file mode 100644 index 7e6ec2e33..000000000 --- a/helm/olmv1/templates/poddisruptionbudget-olmv1-system-catalogd.yml +++ /dev/null @@ -1,22 +0,0 @@ -{{- if and .Values.options.catalogd.enabled .Values.options.catalogd.podDisruptionBudget.enabled }} -apiVersion: policy/v1 -kind: PodDisruptionBudget -metadata: - name: catalogd-controller-manager - namespace: {{ .Values.namespaces.olmv1.name }} - labels: - app.kubernetes.io/name: catalogd - {{- include "olmv1.labels" . | nindent 4 }} - annotations: - {{- include "olmv1.annotations" . | nindent 4 }} -spec: - {{- if .Values.options.catalogd.podDisruptionBudget.minAvailable }} - minAvailable: {{ .Values.options.catalogd.podDisruptionBudget.minAvailable }} - {{- end }} - {{- if .Values.options.catalogd.podDisruptionBudget.maxUnavailable }} - maxUnavailable: {{ .Values.options.catalogd.podDisruptionBudget.maxUnavailable }} - {{- end }} - selector: - matchLabels: - control-plane: catalogd-controller-manager -{{- end }} diff --git a/helm/olmv1/templates/poddisruptionbudget-olmv1-system-operator-controller.yml b/helm/olmv1/templates/poddisruptionbudget-olmv1-system-operator-controller.yml deleted file mode 100644 index 0d98d528f..000000000 --- a/helm/olmv1/templates/poddisruptionbudget-olmv1-system-operator-controller.yml +++ /dev/null @@ -1,22 +0,0 @@ -{{- if and .Values.options.operatorController.enabled .Values.options.operatorController.podDisruptionBudget.enabled }} -apiVersion: policy/v1 -kind: PodDisruptionBudget -metadata: - name: operator-controller-controller-manager - namespace: {{ .Values.namespaces.olmv1.name }} - labels: - app.kubernetes.io/name: operator-controller - {{- include "olmv1.labels" . | nindent 4 }} - annotations: - {{- include "olmv1.annotations" . | nindent 4 }} -spec: - {{- if .Values.options.operatorController.podDisruptionBudget.minAvailable }} - minAvailable: {{ .Values.options.operatorController.podDisruptionBudget.minAvailable }} - {{- end }} - {{- if .Values.options.operatorController.podDisruptionBudget.maxUnavailable }} - maxUnavailable: {{ .Values.options.operatorController.podDisruptionBudget.maxUnavailable }} - {{- end }} - selector: - matchLabels: - control-plane: operator-controller-controller-manager -{{- end }} diff --git a/helm/olmv1/values.yaml b/helm/olmv1/values.yaml index cb454f625..5ab9d7672 100644 --- a/helm/olmv1/values.yaml +++ b/helm/olmv1/values.yaml @@ -12,9 +12,6 @@ options: features: enabled: [] disabled: [] - podDisruptionBudget: - enabled: true - minAvailable: 1 catalogd: enabled: true deployment: @@ -23,9 +20,6 @@ options: features: enabled: [] disabled: [] - podDisruptionBudget: - enabled: true - minAvailable: 1 certManager: enabled: false e2e: diff --git a/manifests/experimental-e2e.yaml b/manifests/experimental-e2e.yaml index e536cd72a..672830225 100644 --- a/manifests/experimental-e2e.yaml +++ b/manifests/experimental-e2e.yaml @@ -87,40 +87,6 @@ spec: - Ingress - Egress --- -# Source: olmv1/templates/poddisruptionbudget-olmv1-system-catalogd.yml -apiVersion: policy/v1 -kind: PodDisruptionBudget -metadata: - name: catalogd-controller-manager - namespace: olmv1-system - labels: - app.kubernetes.io/name: catalogd - app.kubernetes.io/part-of: olm - annotations: - olm.operatorframework.io/feature-set: experimental-e2e -spec: - minAvailable: 1 - selector: - matchLabels: - control-plane: catalogd-controller-manager ---- -# Source: olmv1/templates/poddisruptionbudget-olmv1-system-operator-controller.yml -apiVersion: policy/v1 -kind: PodDisruptionBudget -metadata: - name: operator-controller-controller-manager - namespace: olmv1-system - labels: - app.kubernetes.io/name: operator-controller - app.kubernetes.io/part-of: olm - annotations: - olm.operatorframework.io/feature-set: experimental-e2e -spec: - minAvailable: 1 - selector: - matchLabels: - control-plane: operator-controller-controller-manager ---- # Source: olmv1/templates/serviceaccount-olmv1-system-common-controller-manager.yml apiVersion: v1 kind: ServiceAccount @@ -650,13 +616,8 @@ spec: name: v1 schema: openAPIV3Schema: - description: |- - ClusterExtensionRevision represents an immutable snapshot of Kubernetes objects - for a specific version of a ClusterExtension. Each revision contains objects - organized into phases that roll out sequentially. The same object can only be managed by a single revision - at a time. Ownership of objects is transitioned from one revision to the next as the extension is upgraded - or reconfigured. Once the latest revision has rolled out successfully, previous active revisions are archived for - posterity. + description: ClusterExtensionRevision is the Schema for the clusterextensionrevisions + API properties: apiVersion: description: |- @@ -676,100 +637,53 @@ spec: metadata: type: object spec: - description: spec defines the desired state of the ClusterExtensionRevision. + description: spec is an optional field that defines the desired state + of the ClusterExtension. properties: lifecycleState: default: Active - description: |- - lifecycleState specifies the lifecycle state of the ClusterExtensionRevision. - - When set to "Active" (the default), the revision is actively managed and reconciled. - When set to "Archived", the revision is inactive and any resources not managed by a subsequent revision are deleted. - The revision is removed from the owner list of all objects previously under management. - All objects that did not transition to a succeeding revision are deleted. - - Once a revision is set to "Archived", it cannot be un-archived. + description: Specifies the lifecycle state of the ClusterExtensionRevision. enum: - Active + - Paused - Archived type: string x-kubernetes-validations: - - message: cannot un-archive - rule: oldSelf == 'Active' || oldSelf == 'Archived' && oldSelf == - self + - message: can not un-archive + rule: oldSelf == 'Active' || oldSelf == 'Paused' || oldSelf == 'Archived' + && oldSelf == self phases: description: |- - phases is an optional, immutable list of phases that group objects to be applied together. - - Objects are organized into phases based on their Group-Kind. Common phases include: - - namespaces: Namespace objects - - policies: ResourceQuota, LimitRange, NetworkPolicy objects - - rbac: ServiceAccount, Role, RoleBinding, ClusterRole, ClusterRoleBinding objects - - crds: CustomResourceDefinition objects - - storage: PersistentVolume, PersistentVolumeClaim, StorageClass objects - - deploy: Deployment, StatefulSet, DaemonSet, Service, ConfigMap, Secret objects - - publish: Ingress, APIService, Route, Webhook objects - - All objects in a phase are applied in no particular order. - The revision progresses to the next phase only after all objects in the current phase pass their readiness probes. - - Once set, even if empty, the phases field is immutable. + Phases are groups of objects that will be applied at the same time. + All objects in the phase will have to pass their probes in order to progress to the next phase. items: description: |- - ClusterExtensionRevisionPhase represents a group of objects that are applied together. The phase is considered - complete only after all objects pass their status probes. + ClusterExtensionRevisionPhase are groups of objects that will be applied at the same time. + All objects in the a phase will have to pass their probes in order to progress to the next phase. properties: name: - description: |- - name is a required identifier for this phase. - - phase names must follow the DNS label standard as defined in [RFC 1123]. - They must contain only lowercase alphanumeric characters or hyphens (-), - start and end with an alphanumeric character, and be no longer than 63 characters. - - Common phase names include: namespaces, policies, rbac, crds, storage, deploy, publish. - - [RFC 1123]: https://tools.ietf.org/html/rfc1123 + description: Name identifies this phase. maxLength: 63 pattern: ^[a-z]([-a-z0-9]*[a-z0-9])?$ type: string objects: - description: |- - objects is a required list of all Kubernetes objects that belong to this phase. - - All objects in this list are applied to the cluster in no particular order. + description: Objects are a list of all the objects within this + phase. items: - description: |- - ClusterExtensionRevisionObject represents a Kubernetes object to be applied as part - of a phase, along with its collision protection settings. + description: ClusterExtensionRevisionObject contains an object + and settings for it. properties: collisionProtection: default: Prevent description: |- - collisionProtection controls whether the operator can adopt and modify objects - that already exist on the cluster. - - When set to "Prevent" (the default), the operator only manages objects it created itself. - This prevents ownership collisions. - - When set to "IfNoController", the operator can adopt and modify pre-existing objects - that are not owned by another controller. - This is useful for taking over management of manually-created resources. - - When set to "None", the operator can adopt and modify any pre-existing object, even if - owned by another controller. - Use this setting with extreme caution as it may cause multiple controllers to fight over - the same resource, resulting in increased load on the API server and etcd. + CollisionProtection controls whether OLM can adopt and modify objects + already existing on the cluster or even owned by another controller. enum: - Prevent - IfNoController - None type: string object: - description: |- - object is a required embedded Kubernetes object to be applied. - - This object must be a valid Kubernetes resource with apiVersion, kind, and metadata fields. type: object x-kubernetes-embedded-resource: true x-kubernetes-preserve-unknown-fields: true @@ -790,12 +704,9 @@ spec: rule: self == oldSelf || oldSelf.size() == 0 revision: description: |- - revision is a required, immutable sequence number representing a specific revision - of the parent ClusterExtension. - - The revision field must be a positive integer. - Each ClusterExtensionRevision belonging to the same parent ClusterExtension must have a unique revision number. - The revision number must always be the previous revision number plus one, or 1 for the first revision. + Revision is a sequence number representing a specific revision of the ClusterExtension instance. + Must be positive. Each ClusterExtensionRevision of the same parent ClusterExtension needs to have + a unique value assigned. It is immutable after creation. The new revision number must always be previous revision +1. format: int64 minimum: 1 type: integer @@ -806,31 +717,10 @@ spec: - revision type: object status: - description: status is optional and defines the observed state of the - ClusterExtensionRevision. + description: status is an optional field that defines the observed state + of the ClusterExtension. properties: conditions: - description: |- - conditions is an optional list of status conditions describing the state of the - ClusterExtensionRevision. - - The Progressing condition represents whether the revision is actively rolling out: - - When status is True and reason is Progressing, the revision rollout is actively making progress and is in transition. - - When Progressing is not present, the revision is not currently in transition. - - The Available condition represents whether the revision has been successfully rolled out and is available: - - When status is True and reason is Available, the revision has been successfully rolled out and all objects pass their readiness probes. - - When status is False and reason is Incomplete, the revision rollout has not yet completed but no specific failures have been detected. - - When status is False and reason is ProbeFailure, one or more objects are failing their readiness probes during rollout. - - When status is False and reason is ReconcileFailure, the revision has encountered a general reconciliation failure. - - When status is False and reason is RevisionValidationFailure, the revision failed preflight validation checks. - - When status is False and reason is PhaseValidationError, a phase within the revision failed preflight validation checks. - - When status is False and reason is ObjectCollisions, objects in the revision collide with existing cluster objects that cannot be adopted. - - When status is Unknown and reason is Archived, the revision has been archived and its objects have been torn down. - - When status is Unknown and reason is Migrated, the revision was migrated from an existing release and object status probe results have not yet been observed. - - The Succeeded condition represents whether the revision has successfully completed its rollout: - - When status is True and reason is RolloutSuccess, the revision has successfully completed its rollout. This condition is set once and persists even if the revision later becomes unavailable. items: description: Condition contains details for one aspect of the current state of this API Resource. diff --git a/manifests/experimental.yaml b/manifests/experimental.yaml index f88debab0..199838eac 100644 --- a/manifests/experimental.yaml +++ b/manifests/experimental.yaml @@ -87,40 +87,6 @@ spec: - Ingress - Egress --- -# Source: olmv1/templates/poddisruptionbudget-olmv1-system-catalogd.yml -apiVersion: policy/v1 -kind: PodDisruptionBudget -metadata: - name: catalogd-controller-manager - namespace: olmv1-system - labels: - app.kubernetes.io/name: catalogd - app.kubernetes.io/part-of: olm - annotations: - olm.operatorframework.io/feature-set: experimental -spec: - minAvailable: 1 - selector: - matchLabels: - control-plane: catalogd-controller-manager ---- -# Source: olmv1/templates/poddisruptionbudget-olmv1-system-operator-controller.yml -apiVersion: policy/v1 -kind: PodDisruptionBudget -metadata: - name: operator-controller-controller-manager - namespace: olmv1-system - labels: - app.kubernetes.io/name: operator-controller - app.kubernetes.io/part-of: olm - annotations: - olm.operatorframework.io/feature-set: experimental -spec: - minAvailable: 1 - selector: - matchLabels: - control-plane: operator-controller-controller-manager ---- # Source: olmv1/templates/serviceaccount-olmv1-system-common-controller-manager.yml apiVersion: v1 kind: ServiceAccount @@ -615,13 +581,8 @@ spec: name: v1 schema: openAPIV3Schema: - description: |- - ClusterExtensionRevision represents an immutable snapshot of Kubernetes objects - for a specific version of a ClusterExtension. Each revision contains objects - organized into phases that roll out sequentially. The same object can only be managed by a single revision - at a time. Ownership of objects is transitioned from one revision to the next as the extension is upgraded - or reconfigured. Once the latest revision has rolled out successfully, previous active revisions are archived for - posterity. + description: ClusterExtensionRevision is the Schema for the clusterextensionrevisions + API properties: apiVersion: description: |- @@ -641,100 +602,53 @@ spec: metadata: type: object spec: - description: spec defines the desired state of the ClusterExtensionRevision. + description: spec is an optional field that defines the desired state + of the ClusterExtension. properties: lifecycleState: default: Active - description: |- - lifecycleState specifies the lifecycle state of the ClusterExtensionRevision. - - When set to "Active" (the default), the revision is actively managed and reconciled. - When set to "Archived", the revision is inactive and any resources not managed by a subsequent revision are deleted. - The revision is removed from the owner list of all objects previously under management. - All objects that did not transition to a succeeding revision are deleted. - - Once a revision is set to "Archived", it cannot be un-archived. + description: Specifies the lifecycle state of the ClusterExtensionRevision. enum: - Active + - Paused - Archived type: string x-kubernetes-validations: - - message: cannot un-archive - rule: oldSelf == 'Active' || oldSelf == 'Archived' && oldSelf == - self + - message: can not un-archive + rule: oldSelf == 'Active' || oldSelf == 'Paused' || oldSelf == 'Archived' + && oldSelf == self phases: description: |- - phases is an optional, immutable list of phases that group objects to be applied together. - - Objects are organized into phases based on their Group-Kind. Common phases include: - - namespaces: Namespace objects - - policies: ResourceQuota, LimitRange, NetworkPolicy objects - - rbac: ServiceAccount, Role, RoleBinding, ClusterRole, ClusterRoleBinding objects - - crds: CustomResourceDefinition objects - - storage: PersistentVolume, PersistentVolumeClaim, StorageClass objects - - deploy: Deployment, StatefulSet, DaemonSet, Service, ConfigMap, Secret objects - - publish: Ingress, APIService, Route, Webhook objects - - All objects in a phase are applied in no particular order. - The revision progresses to the next phase only after all objects in the current phase pass their readiness probes. - - Once set, even if empty, the phases field is immutable. + Phases are groups of objects that will be applied at the same time. + All objects in the phase will have to pass their probes in order to progress to the next phase. items: description: |- - ClusterExtensionRevisionPhase represents a group of objects that are applied together. The phase is considered - complete only after all objects pass their status probes. + ClusterExtensionRevisionPhase are groups of objects that will be applied at the same time. + All objects in the a phase will have to pass their probes in order to progress to the next phase. properties: name: - description: |- - name is a required identifier for this phase. - - phase names must follow the DNS label standard as defined in [RFC 1123]. - They must contain only lowercase alphanumeric characters or hyphens (-), - start and end with an alphanumeric character, and be no longer than 63 characters. - - Common phase names include: namespaces, policies, rbac, crds, storage, deploy, publish. - - [RFC 1123]: https://tools.ietf.org/html/rfc1123 + description: Name identifies this phase. maxLength: 63 pattern: ^[a-z]([-a-z0-9]*[a-z0-9])?$ type: string objects: - description: |- - objects is a required list of all Kubernetes objects that belong to this phase. - - All objects in this list are applied to the cluster in no particular order. + description: Objects are a list of all the objects within this + phase. items: - description: |- - ClusterExtensionRevisionObject represents a Kubernetes object to be applied as part - of a phase, along with its collision protection settings. + description: ClusterExtensionRevisionObject contains an object + and settings for it. properties: collisionProtection: default: Prevent description: |- - collisionProtection controls whether the operator can adopt and modify objects - that already exist on the cluster. - - When set to "Prevent" (the default), the operator only manages objects it created itself. - This prevents ownership collisions. - - When set to "IfNoController", the operator can adopt and modify pre-existing objects - that are not owned by another controller. - This is useful for taking over management of manually-created resources. - - When set to "None", the operator can adopt and modify any pre-existing object, even if - owned by another controller. - Use this setting with extreme caution as it may cause multiple controllers to fight over - the same resource, resulting in increased load on the API server and etcd. + CollisionProtection controls whether OLM can adopt and modify objects + already existing on the cluster or even owned by another controller. enum: - Prevent - IfNoController - None type: string object: - description: |- - object is a required embedded Kubernetes object to be applied. - - This object must be a valid Kubernetes resource with apiVersion, kind, and metadata fields. type: object x-kubernetes-embedded-resource: true x-kubernetes-preserve-unknown-fields: true @@ -755,12 +669,9 @@ spec: rule: self == oldSelf || oldSelf.size() == 0 revision: description: |- - revision is a required, immutable sequence number representing a specific revision - of the parent ClusterExtension. - - The revision field must be a positive integer. - Each ClusterExtensionRevision belonging to the same parent ClusterExtension must have a unique revision number. - The revision number must always be the previous revision number plus one, or 1 for the first revision. + Revision is a sequence number representing a specific revision of the ClusterExtension instance. + Must be positive. Each ClusterExtensionRevision of the same parent ClusterExtension needs to have + a unique value assigned. It is immutable after creation. The new revision number must always be previous revision +1. format: int64 minimum: 1 type: integer @@ -771,31 +682,10 @@ spec: - revision type: object status: - description: status is optional and defines the observed state of the - ClusterExtensionRevision. + description: status is an optional field that defines the observed state + of the ClusterExtension. properties: conditions: - description: |- - conditions is an optional list of status conditions describing the state of the - ClusterExtensionRevision. - - The Progressing condition represents whether the revision is actively rolling out: - - When status is True and reason is Progressing, the revision rollout is actively making progress and is in transition. - - When Progressing is not present, the revision is not currently in transition. - - The Available condition represents whether the revision has been successfully rolled out and is available: - - When status is True and reason is Available, the revision has been successfully rolled out and all objects pass their readiness probes. - - When status is False and reason is Incomplete, the revision rollout has not yet completed but no specific failures have been detected. - - When status is False and reason is ProbeFailure, one or more objects are failing their readiness probes during rollout. - - When status is False and reason is ReconcileFailure, the revision has encountered a general reconciliation failure. - - When status is False and reason is RevisionValidationFailure, the revision failed preflight validation checks. - - When status is False and reason is PhaseValidationError, a phase within the revision failed preflight validation checks. - - When status is False and reason is ObjectCollisions, objects in the revision collide with existing cluster objects that cannot be adopted. - - When status is Unknown and reason is Archived, the revision has been archived and its objects have been torn down. - - When status is Unknown and reason is Migrated, the revision was migrated from an existing release and object status probe results have not yet been observed. - - The Succeeded condition represents whether the revision has successfully completed its rollout: - - When status is True and reason is RolloutSuccess, the revision has successfully completed its rollout. This condition is set once and persists even if the revision later becomes unavailable. items: description: Condition contains details for one aspect of the current state of this API Resource. diff --git a/manifests/standard-e2e.yaml b/manifests/standard-e2e.yaml index 1aed38ba9..5c9590784 100644 --- a/manifests/standard-e2e.yaml +++ b/manifests/standard-e2e.yaml @@ -87,40 +87,6 @@ spec: - Ingress - Egress --- -# Source: olmv1/templates/poddisruptionbudget-olmv1-system-catalogd.yml -apiVersion: policy/v1 -kind: PodDisruptionBudget -metadata: - name: catalogd-controller-manager - namespace: olmv1-system - labels: - app.kubernetes.io/name: catalogd - app.kubernetes.io/part-of: olm - annotations: - olm.operatorframework.io/feature-set: standard-e2e -spec: - minAvailable: 1 - selector: - matchLabels: - control-plane: catalogd-controller-manager ---- -# Source: olmv1/templates/poddisruptionbudget-olmv1-system-operator-controller.yml -apiVersion: policy/v1 -kind: PodDisruptionBudget -metadata: - name: operator-controller-controller-manager - namespace: olmv1-system - labels: - app.kubernetes.io/name: operator-controller - app.kubernetes.io/part-of: olm - annotations: - olm.operatorframework.io/feature-set: standard-e2e -spec: - minAvailable: 1 - selector: - matchLabels: - control-plane: operator-controller-controller-manager ---- # Source: olmv1/templates/serviceaccount-olmv1-system-common-controller-manager.yml apiVersion: v1 kind: ServiceAccount diff --git a/manifests/standard.yaml b/manifests/standard.yaml index 34cc57918..95e400c26 100644 --- a/manifests/standard.yaml +++ b/manifests/standard.yaml @@ -87,40 +87,6 @@ spec: - Ingress - Egress --- -# Source: olmv1/templates/poddisruptionbudget-olmv1-system-catalogd.yml -apiVersion: policy/v1 -kind: PodDisruptionBudget -metadata: - name: catalogd-controller-manager - namespace: olmv1-system - labels: - app.kubernetes.io/name: catalogd - app.kubernetes.io/part-of: olm - annotations: - olm.operatorframework.io/feature-set: standard -spec: - minAvailable: 1 - selector: - matchLabels: - control-plane: catalogd-controller-manager ---- -# Source: olmv1/templates/poddisruptionbudget-olmv1-system-operator-controller.yml -apiVersion: policy/v1 -kind: PodDisruptionBudget -metadata: - name: operator-controller-controller-manager - namespace: olmv1-system - labels: - app.kubernetes.io/name: operator-controller - app.kubernetes.io/part-of: olm - annotations: - olm.operatorframework.io/feature-set: standard -spec: - minAvailable: 1 - selector: - matchLabels: - control-plane: operator-controller-controller-manager ---- # Source: olmv1/templates/serviceaccount-olmv1-system-common-controller-manager.yml apiVersion: v1 kind: ServiceAccount diff --git a/openshift/catalogd/manifests-experimental.yaml b/openshift/catalogd/manifests-experimental.yaml index 06bd2abd8..4dacdee86 100644 --- a/openshift/catalogd/manifests-experimental.yaml +++ b/openshift/catalogd/manifests-experimental.yaml @@ -65,23 +65,6 @@ spec: - Ingress - Egress --- -# Source: olmv1/templates/poddisruptionbudget-olmv1-system-catalogd.yml -apiVersion: policy/v1 -kind: PodDisruptionBudget -metadata: - name: catalogd-controller-manager - namespace: openshift-catalogd - labels: - app.kubernetes.io/name: catalogd - app.kubernetes.io/part-of: olm - annotations: - olm.operatorframework.io/feature-set: experimental -spec: - minAvailable: 1 - selector: - matchLabels: - control-plane: catalogd-controller-manager ---- # Source: olmv1/templates/serviceaccount-olmv1-system-common-controller-manager.yml apiVersion: v1 kind: ServiceAccount diff --git a/openshift/catalogd/manifests.yaml b/openshift/catalogd/manifests.yaml index e197256bf..68b6c87f3 100644 --- a/openshift/catalogd/manifests.yaml +++ b/openshift/catalogd/manifests.yaml @@ -65,23 +65,6 @@ spec: - Ingress - Egress --- -# Source: olmv1/templates/poddisruptionbudget-olmv1-system-catalogd.yml -apiVersion: policy/v1 -kind: PodDisruptionBudget -metadata: - name: catalogd-controller-manager - namespace: openshift-catalogd - labels: - app.kubernetes.io/name: catalogd - app.kubernetes.io/part-of: olm - annotations: - olm.operatorframework.io/feature-set: standard -spec: - minAvailable: 1 - selector: - matchLabels: - control-plane: catalogd-controller-manager ---- # Source: olmv1/templates/serviceaccount-olmv1-system-common-controller-manager.yml apiVersion: v1 kind: ServiceAccount diff --git a/openshift/operator-controller/manifests-experimental.yaml b/openshift/operator-controller/manifests-experimental.yaml index e8893063e..6ecb52ff2 100644 --- a/openshift/operator-controller/manifests-experimental.yaml +++ b/openshift/operator-controller/manifests-experimental.yaml @@ -61,23 +61,6 @@ spec: - Ingress - Egress --- -# Source: olmv1/templates/poddisruptionbudget-olmv1-system-operator-controller.yml -apiVersion: policy/v1 -kind: PodDisruptionBudget -metadata: - name: operator-controller-controller-manager - namespace: openshift-operator-controller - labels: - app.kubernetes.io/name: operator-controller - app.kubernetes.io/part-of: olm - annotations: - olm.operatorframework.io/feature-set: experimental -spec: - minAvailable: 1 - selector: - matchLabels: - control-plane: operator-controller-controller-manager ---- # Source: olmv1/templates/serviceaccount-olmv1-system-common-controller-manager.yml apiVersion: v1 kind: ServiceAccount diff --git a/openshift/operator-controller/manifests.yaml b/openshift/operator-controller/manifests.yaml index 8d2be5ecf..091dfe26a 100644 --- a/openshift/operator-controller/manifests.yaml +++ b/openshift/operator-controller/manifests.yaml @@ -61,23 +61,6 @@ spec: - Ingress - Egress --- -# Source: olmv1/templates/poddisruptionbudget-olmv1-system-operator-controller.yml -apiVersion: policy/v1 -kind: PodDisruptionBudget -metadata: - name: operator-controller-controller-manager - namespace: openshift-operator-controller - labels: - app.kubernetes.io/name: operator-controller - app.kubernetes.io/part-of: olm - annotations: - olm.operatorframework.io/feature-set: standard -spec: - minAvailable: 1 - selector: - matchLabels: - control-plane: operator-controller-controller-manager ---- # Source: olmv1/templates/serviceaccount-olmv1-system-common-controller-manager.yml apiVersion: v1 kind: ServiceAccount diff --git a/requirements.txt b/requirements.txt index ba058cbbc..72686f75c 100644 --- a/requirements.txt +++ b/requirements.txt @@ -21,7 +21,7 @@ paginate==0.5.7 pathspec==0.12.1 platformdirs==4.5.0 Pygments==2.19.2 -pymdown-extensions==10.17.2 +pymdown-extensions==10.17.1 pyquery==2.0.1 python-dateutil==2.9.0.post0 PyYAML==6.0.3 diff --git a/test/e2e/cluster_extension_install_test.go b/test/e2e/cluster_extension_install_test.go index b3380ff0f..ab0bf48b1 100644 --- a/test/e2e/cluster_extension_install_test.go +++ b/test/e2e/cluster_extension_install_test.go @@ -4,7 +4,6 @@ import ( "context" "fmt" "os" - "slices" "testing" "time" @@ -198,8 +197,7 @@ func TestClusterExtensionInstallRegistryMultipleBundles(t *testing.T) { t.Log("When a cluster extension is installed from a catalog") clusterExtension, extensionCatalog, sa, ns := TestInit(t) - extraCatalogName := fmt.Sprintf("extra-test-catalog-%s", rand.String(8)) - extraCatalog, err := CreateTestCatalog(context.Background(), extraCatalogName, os.Getenv(testCatalogRefEnvVar)) + extraCatalog, err := CreateTestCatalog(context.Background(), "extra-test-catalog", os.Getenv(testCatalogRefEnvVar)) require.NoError(t, err) defer TestCleanup(t, extensionCatalog, clusterExtension, sa, ns) @@ -240,11 +238,7 @@ func TestClusterExtensionInstallRegistryMultipleBundles(t *testing.T) { require.NotNil(ct, cond) require.Equal(ct, metav1.ConditionTrue, cond.Status) require.Equal(ct, ocv1.ReasonRetrying, cond.Reason) - // Catalog names are sorted alphabetically in the error message - catalogs := []string{extensionCatalog.Name, extraCatalog.Name} - slices.Sort(catalogs) - expectedMessage := fmt.Sprintf("in multiple catalogs with the same priority %v", catalogs) - require.Contains(ct, cond.Message, expectedMessage) + require.Contains(ct, cond.Message, "in multiple catalogs with the same priority [extra-test-catalog test-catalog]") }, pollDuration, pollInterval) } @@ -447,7 +441,7 @@ func TestClusterExtensionInstallReResolvesWhenCatalogIsPatched(t *testing.T) { // patch imageRef tag on test-catalog image with v2 image t.Log("By patching the catalog ImageRef to point to the v2 catalog") updatedCatalogImage := fmt.Sprintf("%s/e2e/test-catalog:v2", os.Getenv("CLUSTER_REGISTRY_HOST")) - err := patchTestCatalog(context.Background(), extensionCatalog.Name, updatedCatalogImage) + err := patchTestCatalog(context.Background(), testCatalogName, updatedCatalogImage) require.NoError(t, err) require.EventuallyWithT(t, func(ct *assert.CollectT) { require.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: extensionCatalog.Name}, extensionCatalog)) @@ -480,9 +474,8 @@ func TestClusterExtensionInstallReResolvesWhenNewCatalog(t *testing.T) { require.NoError(t, err) // create a test-catalog with latest image tag - catalogName := fmt.Sprintf("test-catalog-%s", rand.String(8)) latestCatalogImage := fmt.Sprintf("%s/e2e/test-catalog:latest", os.Getenv("CLUSTER_REGISTRY_HOST")) - extensionCatalog, err := CreateTestCatalog(context.Background(), catalogName, latestCatalogImage) + extensionCatalog, err := CreateTestCatalog(context.Background(), testCatalogName, latestCatalogImage) require.NoError(t, err) clusterExtensionName := fmt.Sprintf("clusterextension-%s", rand.String(8)) clusterExtension := &ocv1.ClusterExtension{ diff --git a/test/helpers/helpers.go b/test/helpers/helpers.go index af142c6e3..49ebeaab6 100644 --- a/test/helpers/helpers.go +++ b/test/helpers/helpers.go @@ -218,7 +218,6 @@ func TestInit(t *testing.T) (*ocv1.ClusterExtension, *ocv1.ClusterCatalog, *core func TestInitClusterExtensionClusterCatalog(t *testing.T) (*ocv1.ClusterExtension, *ocv1.ClusterCatalog) { ceName := fmt.Sprintf("clusterextension-%s", rand.String(8)) - catalogName := fmt.Sprintf("test-catalog-%s", rand.String(8)) ce := &ocv1.ClusterExtension{ ObjectMeta: metav1.ObjectMeta{ @@ -226,10 +225,10 @@ func TestInitClusterExtensionClusterCatalog(t *testing.T) (*ocv1.ClusterExtensio }, } - cc, err := CreateTestCatalog(context.Background(), catalogName, os.Getenv(testCatalogRefEnvVar)) + cc, err := CreateTestCatalog(context.Background(), testCatalogName, os.Getenv(testCatalogRefEnvVar)) require.NoError(t, err) - ValidateCatalogUnpackWithName(t, catalogName) + ValidateCatalogUnpack(t) return ce, cc } @@ -251,18 +250,11 @@ func TestInitServiceAccountNamespace(t *testing.T, clusterExtensionName string) return sa, ns } -// ValidateCatalogUnpack validates that the test catalog with the default name has unpacked successfully. -// Deprecated: Use ValidateCatalogUnpackWithName for tests that use unique catalog names. func ValidateCatalogUnpack(t *testing.T) { - ValidateCatalogUnpackWithName(t, testCatalogName) -} - -// ValidateCatalogUnpackWithName validates that a catalog with the given name has unpacked successfully. -func ValidateCatalogUnpackWithName(t *testing.T, catalogName string) { catalog := &ocv1.ClusterCatalog{} t.Log("Ensuring ClusterCatalog has Status.Condition of Progressing with a status == True and reason == Succeeded") require.EventuallyWithT(t, func(ct *assert.CollectT) { - err := c.Get(context.Background(), types.NamespacedName{Name: catalogName}, catalog) + err := c.Get(context.Background(), types.NamespacedName{Name: testCatalogName}, catalog) require.NoError(ct, err) cond := apimeta.FindStatusCondition(catalog.Status.Conditions, ocv1.TypeProgressing) require.NotNil(ct, cond) @@ -273,11 +265,11 @@ func ValidateCatalogUnpackWithName(t *testing.T, catalogName string) { t.Log("Checking that catalog has the expected metadata label") require.NotNil(t, catalog.Labels) require.Contains(t, catalog.Labels, "olm.operatorframework.io/metadata.name") - require.Equal(t, catalogName, catalog.Labels["olm.operatorframework.io/metadata.name"]) + require.Equal(t, testCatalogName, catalog.Labels["olm.operatorframework.io/metadata.name"]) t.Log("Ensuring ClusterCatalog has Status.Condition of Type = Serving with status == True") require.EventuallyWithT(t, func(ct *assert.CollectT) { - err := c.Get(context.Background(), types.NamespacedName{Name: catalogName}, catalog) + err := c.Get(context.Background(), types.NamespacedName{Name: testCatalogName}, catalog) require.NoError(ct, err) cond := apimeta.FindStatusCondition(catalog.Status.Conditions, ocv1.TypeServing) require.NotNil(ct, cond) From aa8c39f8dd862023754ff47b4091e1ff7beecc78 Mon Sep 17 00:00:00 2001 From: Forrest Babcock Date: Fri, 5 Dec 2025 14:26:56 -0500 Subject: [PATCH 4/6] Revert "Merge pull request #571 from kuiwang02/agents" This reverts commit 496965e52fbf2e391d294578f6d94a20011a3280, reversing changes made to c51b19bf149bb1291921d9eb0f96a80cd208f8d3. --- openshift/tests-extension/test/qe/AGENTS.md | 587 ------------------ openshift/tests-extension/test/qe/CLAUDE.md | 1 - openshift/tests-extension/test/qe/README.md | 129 ++-- .../tests-extension/test/qe/specs/olmv1_cc.go | 2 +- 4 files changed, 71 insertions(+), 648 deletions(-) delete mode 100644 openshift/tests-extension/test/qe/AGENTS.md delete mode 100644 openshift/tests-extension/test/qe/CLAUDE.md diff --git a/openshift/tests-extension/test/qe/AGENTS.md b/openshift/tests-extension/test/qe/AGENTS.md deleted file mode 100644 index 29e75ad63..000000000 --- a/openshift/tests-extension/test/qe/AGENTS.md +++ /dev/null @@ -1,587 +0,0 @@ -# AGENTS.md - -This file provides AI agents with comprehensive context about the OLM v1 QE Test Extension project to enable effective test development, debugging, and maintenance. - -## Scope and Working Directory - -### Applicability -This AGENTS.md applies to the **OLM v1 QE Test Cases** located at: -``` -operator-framework-operator-controller/openshift/tests-extension/test/qe/ -``` - -**IMPORTANT**: This file is specifically for the **QE migration test code** in the `test/qe/` directory, not for: -- Origin migration test code (in other directories under `tests-extension/test/`) -- Product code in the main `operator-controller` repository - -### Required Working Directory -For this AGENTS.md to be effective, ensure your working directory is set to: -```bash -/operator-framework-operator-controller/openshift/tests-extension/test/qe/ -``` - -### Related Directories for QE Migration - -Beyond the main `test/qe/` directory, QE migration work also involves: -- `tests-extension/cmd/` - Test binary entry point and suite definitions -- `tests-extension/Makefile` - Build automation -- `tests-extension/pkg/bindata/qe/` - Embedded test data for QE tests - -### Working Directory Verification for AI Agents - -**Context Awareness**: This AGENTS.md may be loaded even when not actively working with QE test files (e.g., user briefly entered `test/qe/` directory and left). Apply these guidelines intelligently based on the actual task. - -#### When to Apply This AGENTS.md - -**ONLY apply this AGENTS.md when the user is working with QE migration test files**, identified by: -- File paths containing `tests-extension/test/qe/` -- File paths containing `tests-extension/cmd/` (suite definitions) -- File paths containing `tests-extension/pkg/bindata/qe/` (test data) -- Tasks explicitly about "OLM v1 QE tests", "QE migration", "olmv1 qe", "test extension", "olmv1-tests-ext" - -**DO NOT apply this AGENTS.md when**: -- Working with files outside these directories (e.g., Origin migration tests, product code) -- User is in a different part of the repository -- Even if this AGENTS.md was previously loaded - -#### Directory Check (Only for QE Test File Operations) - -When the user asks to work with QE test files (files under `tests-extension/test/qe/`): - -1. **Check current working directory**: - ```bash - pwd - ``` - -2. **Verify directory alignment**: - - Preferred: Current directory should be `tests-extension/test/qe/` or subdirectory - - This ensures AGENTS.md context is automatically available - -3. **If working directory is not aligned**: - - **Inform (don't block) the user**: - ``` - 💡 Note: Working Directory Suggestion - - You're working with QE test files under tests-extension/test/qe/, - but your current directory is elsewhere. For better context and auto-completion: - - Consider running: cd openshift/tests-extension/test/qe/ - - I can still help you, but setting the working directory correctly - ensures I have full access to the test documentation. - - Do you want to continue in the current directory, or should I wait - for you to switch? - ``` - -**Important**: This is a suggestion, not a blocker. If the user wants to proceed, assist them normally. - -### Path Structure Reference -``` -operator-framework-operator-controller/ ← OpenShift downstream product repo -└── openshift/ - └── tests-extension/ ← Test extension root - ├── cmd/main.go ← Test binary entry point and suite definitions - ├── Makefile ← Build automation - ├── test/ - │ ├── qe/ ← THIS AGENTS.MD APPLIES HERE - │ │ ├── AGENTS.md ← This file - │ │ ├── CLAUDE.md ← Pointer for Claude Code - │ │ ├── README.md ← Project documentation - │ │ ├── specs/ ← QE migration test specifications - │ │ │ ├── olmv1_ce.go ← ClusterExtension tests - │ │ │ └── olmv1_cc.go ← ClusterCatalog tests - │ │ └── util/ ← Test utilities - │ │ ├── olmv1util/ ← OLM v1 specific utilities - │ │ ├── filters/ ← Test filters - │ │ └── ... ← Other utilities - │ └── ... ← Origin migration tests (NOT QE) - ├── pkg/ - │ └── bindata/ - │ └── qe/ ← Embedded test data for QE tests - └── bin/olmv1-tests-ext ← Compiled test binary -``` - -## Project Overview - -This is a **Quality Engineering (QE) test extension** for OLM v1 (Operator Lifecycle Manager v1) on OpenShift. It provides end-to-end functional tests that validate OLM v1 features and functionality in real OpenShift clusters. - -### Purpose -- Validate OLM v1 ClusterExtension and ClusterCatalog functionality across different OpenShift topologies -- Test operator installation, upgrade, and lifecycle management scenarios using OLM v1 APIs -- Ensure OLM v1 works correctly in various cluster configurations (SNO, standard OCP, etc.) -- Provide regression testing for OLM v1 bug fixes and enhancements - -**Note**: OLM v1 currently does NOT support HyperShift and Microshift topologies. Support may be added in future releases. - -### Key Characteristics -- **Framework**: Built on Ginkgo v2 BDD testing framework and OpenShift Tests Extension (OTE) -- **Test Organization**: Polarion-ID based test case management -- **Integration**: Extends `openshift-tests-extension` framework -- **API Focus**: Tests OLM v1 APIs (ClusterExtension, ClusterCatalog) rather than OLM v0 APIs - -## Test Case Sources and Organization - -### Two Types of Test Cases - -#### 1. Migrated Cases from Origin -- **Characteristics**: All robust and stable, meeting OpenShift CI requirements -- **Contribution**: ALL contributed to openshift-tests and used in operator-controller PR presubmit jobs -- **Location**: Should NOT be implemented under `tests-extension/test/qe/specs/` -- **Note**: These cases are outside the scope of this AGENTS.md - -#### 2. Migrated Cases from tests-private (QE Migration) -- **Characteristics**: Some stable, others not -- **Contribution**: Only those meeting OpenShift CI requirements can be contributed to openshift-tests -- **Location**: MUST be implemented under `tests-extension/test/qe/specs/` -- **Auto-Labeling**: Framework automatically adds `Extended` label to these cases -- **Quality Gate**: Cases not meeting CI requirements run in QE-created periodic jobs -- **Note**: This AGENTS.md focuses on these QE migration cases - -### Suite Selection Logic - -**For OpenShift General Jobs and PR Presubmit Jobs**: -- Select all cases by default, then exclude unwanted ones -- Migrated cases from Origin: All fit this logic -- Migrated cases from tests-private: Not all fit by default (hence the `Extended` label mechanism) - - **IMPORTANT**: Only cases with **`Extended` AND `ReleaseGate`** labels can be used in OpenShift General Jobs and PR Presubmit Jobs - - Cases with only `Extended` (no `ReleaseGate`) can only be used in OLM QE-defined periodic jobs - -**Reference**: For OpenShift CI requirements, see [Choosing a Test Suite](https://docs.google.com/document/d/1cFZj9QdzW8hbHc3H0Nce-2xrJMtpDJrwAse9H7hLiWk/edit?tab=t.0#heading=h.tjtqedd47nnu) - -## Test Suite Definitions - -**IMPORTANT**: Suite definitions are sourced from **[cmd/main.go](../cmd/main.go)** and may change over time. Always refer to main.go for the most current definitions. - -For detailed explanations and code examples, see **[README.md](./README.md)** section "Suite Definitions". - -**Quick overview for AI agents**: - -### Suites for OpenShift General Jobs and PR Presubmit Jobs -- **Suite names**: `olmv1/parallel`, `olmv1/serial`, `olmv1/slow`, `olmv1/all` -- **Selection logic**: Non-Extended OR (Extended with ReleaseGate) -- **Defined in**: [cmd/main.go](../cmd/main.go) lines 51-101 - -### Suites for Custom Prow Jobs (OLM QE Periodic) -``` -olmv1/extended # All Extended tests -├── olmv1/extended/releasegate # Extended + ReleaseGate -└── olmv1/extended/candidate # Extended without ReleaseGate - ├── function # Functional tests (excludes StressTest) - │ ├── parallel # Can run concurrently - │ ├── serial # Must run one at a time - │ ├── fast # Non-slow (parallel + serial) - │ └── slow # [Slow] tests - └── stress # StressTest label -``` - -**Key relationships**: `candidate = function + stress`, `function = parallel + serial + slow = fast + slow` - -**Defined in**: [cmd/main.go](../cmd/main.go) lines 103-209, using helper functions from `test/qe/util/filters/filters.go` - -## Directory Structure - -``` -openshift/tests-extension/ -├── cmd/ -│ └── main.go # Test binary entry point and suite definitions -│ -├── test/ -│ └── qe/ # QE migration test code (THIS AGENTS.MD SCOPE) -│ ├── AGENTS.md # This file -│ ├── CLAUDE.md # Claude Code pointer -│ ├── README.md # Project documentation -│ │ -│ ├── specs/ # Test specifications (*.go) -│ │ ├── olmv1_ce.go # ClusterExtension tests -│ │ └── olmv1_cc.go # ClusterCatalog tests -│ │ └── ... # (more test files will be added over time) -│ │ -│ └── util/ # Test utilities and helpers -│ ├── client.go # OpenShift client wrappers -│ ├── framework.go # Test framework setup -│ ├── tools.go # Common test tools -│ ├── clusters.go # Cluster detection utilities -│ ├── extensiontest.go # Extension test helpers -│ ├── template.go # Template processing -│ ├── architecture/ # Architecture detection -│ ├── container/ # Container client (Podman/Quay) -│ ├── filters/ # Test filters -│ │ └── filters.go # Suite filter definitions -│ └── olmv1util/ # OLM v1 specific utilities -│ ├── catalog.go # ClusterCatalog helpers -│ ├── helper.go # General helpers -│ ├── icsp.go # ImageContentSourcePolicy utilities -│ ├── networkpolicy.go # NetworkPolicy utilities -│ ├── gen_rbac.go # RBAC generation helpers -│ └── ... # (more utilities as needed) -│ -├── pkg/ -│ └── bindata/ # Embedded test data -│ └── qe/ # QE test bindata -│ -├── bin/ # Compiled binaries -│ └── olmv1-tests-ext # Compiled test binary -│ -└── Makefile # Build and test automation -``` - -## Test Case Migration Guide - -For complete migration guidelines including code changes and label requirements, refer to **[README.md](./README.md)** section "Test Case Migration Guide". - -**Quick reference for AI agents**: - -### Code Changes Summary -- `exutil.By()` → `g.By()` -- `newCheck().check()` → `olmv1util.NewCheck().Check()` -- Add `exutil.` and `olmv1util.` package prefixes -- Testdata: use `"olm"` (not `"olm", "v1"`) - -### Essential Labels -- `[sig-olmv1]`, `[Jira:OLM]`, `PolarionID:xxxxx` - Required in title -- `g.Label("ReleaseGate")` - For cases meeting OpenShift CI requirements (don't add to `[Disruptive]`, `[Slow]`, or `StressTest` cases) -- `[Skipped:Disconnected]`, `[Skipped:Connected]`, `[Skipped:Proxy]` - Network requirements -- `[Serial]`, `[Slow]`, `[Disruptive]` - Execution characteristics - -**Note**: OLM v1 currently does NOT support Microshift and HyperShift - -## Test Architecture and Patterns - -### Test Structure Pattern - -For complete test structure examples, refer to existing test files: -- **Standard tests**: `specs/olmv1_ce.go`, `specs/olmv1_cc.go` -- **Key patterns**: Look for `g.Describe`, `g.BeforeEach`, `g.AfterEach`, `g.It` blocks - -**Basic structure**: -```go -var _ = g.Describe("[sig-olmv1][Jira:OLM] feature description", func() { - defer g.GinkgoRecover() - var oc = exutil.NewCLIWithoutNamespace("default") - - g.BeforeEach(func() { - // Setup resources, skip conditions - exutil.SkipMicroshift(oc) - // if the user want to create project, use oc.SetupProject() here. - exutil.SkipNoOLMv1Core(oc) - }) - - g.AfterEach(func() { - // Cleanup resources (use defer) - }) - - g.It("PolarionID:xxxxx-test description", g.Label("ReleaseGate"), func() { - // Test implementation - }) -}) -``` - -**Topology-specific patterns**: -- **Microshift** (FUTURE USE - not yet supported): `exutil.SkipMicroshift(oc)` -- **HyperShift** (FUTURE USE - not yet supported): `if !exutil.IsHypershiftMgmtCluster(oc) { g.Skip(...) }` - -### Skip Functions and Cluster Detection - -**Note**: OLM v1 currently does NOT support Microshift and HyperShift. The related functions below are for future use when support is added. - -```go -// For standard tests (skip ON Microshift) - NOT CURRENTLY NEEDED for OLM v1: -exutil.SkipMicroshift(oc) // Skip this test on Microshift clusters - -// For Microshift-specific tests (skip if NOT Microshift) - FUTURE USE: -if !exutil.IsMicroshiftCluster(oc) { - g.Skip("it is not microshift, so skip it.") -} - -// For HyperShift management cluster tests (skip if NOT HyperShift mgmt) - FUTURE USE: -if !exutil.IsHypershiftMgmtCluster(oc) { - g.Skip("this is not a hypershift management cluster, skip test run") -} - -// HyperShift-related setup (FUTURE USE when OLM v1 supports HyperShift): -exutil.EnsureHypershiftBinary(oc) // Ensure HyperShift binary is available -exutil.ValidHypershiftAndGetGuestKubeConf(oc) // Get guest cluster kubeconfig -oc.SetGuestKubeconf(kubeconfig) // Set guest cluster kubeconfig for test -oc.AsGuestKubeconf() // Use guest cluster context for operations - -// AKS cluster detection: -isAKS, err := exutil.IsAKSCluster(context.TODO(), oc) - -// Other skip functions: -exutil.SkipForSNOCluster(oc) // Skip on Single Node OpenShift -exutil.IsFeaturegateEnabled(oc, "FeatureName") // Check feature gate status -``` - -## Local Development Workflow - -For complete local development workflow, build instructions, testing procedures, PR submission requirements, and disconnected environment support, refer to **[README.md](./README.md)** section "Local Development Workflow". - -**Quick reference**: -- Build: `make bindata && make build && make update-metadata` -- Find test: `./bin/olmv1-tests-ext list -o names | grep ` -- Run test: `./bin/olmv1-tests-ext run-test ""` -- openshift-tests integration: See README.md for environment variables and suite selection -- PR requirements: See README.md for stability testing (`/payload-aggregate`) requirements - -**Important for Disconnected Tests**: With IDMS/ITMS in place, tests work the same in both connected and disconnected environments. See README.md for `ValidateAccessEnvironment` usage - -## Test Automation Code Requirements - -For complete code quality guidelines, best practices, logging best practices, and security considerations, refer to **[README.md](./README.md)** section "Test Automation Code Requirements". - -**Critical rules for AI agents**: -- ✅ Use `defer` for cleanup (BEFORE resource creation): `defer resource.Delete(oc)` then `resource.Create(oc)` -- ✅ Use case ID for resource naming (NOT random strings): `name := "test-extension-" + caseID` -- ❌ Don't use `o.Expect` inside `wait.Poll` loops (use `if err != nil { return false, err }`) -- ❌ Don't execute logic in `g.Describe` blocks (only initialization, move logic to `g.BeforeEach`) -- ❌ Don't use quotes in test titles (breaks XML parsing) -- ❌ Don't put large log outputs in error messages (use proper log messages instead of `o.Expect` with large output) - -## Key Utilities - -For complete utility APIs and usage examples, refer to the source code and existing tests: - -### `exutil` Package -**Location**: `util/` directory (e.g., `util/client.go`, `util/framework.go`, `util/tools.go`, `util/clusters.go`) - -**Key functions**: -- CLI management: `NewCLI()`, `KubeConfigPath()` -- Resource operations: `OcAction()`, `OcCreate()`, `OcDelete()`, `PatchResource()` -- Cluster detection: `IsSNOCluster()`, `IsROSA()`, `IsTechPreviewNoUpgrade()`, `IsFeaturegateEnabled()` -- Skip functions: `SkipMicroshift()` (FUTURE USE), `SkipForSNOCluster()` - -### `olmv1util` Package -**Location**: `util/olmv1util/` directory (e.g., `util/olmv1util/catalog.go`, `util/olmv1util/helper.go`) - -**Key types and methods**: -- `ClusterCatalogDescription`: Create, Delete, WaitCatalogStatus -- `ClusterExtensionDescription`: Create, Delete, WaitClusterExtensionCondition -- `NewCheck()`: Validation helper for ClusterExtension/ClusterCatalog state - -**Usage examples**: See existing test files in `specs/olmv1_ce.go` and `specs/olmv1_cc.go` - -## Anti-Patterns to Avoid - -For complete anti-patterns with detailed code examples and explanations, refer to **[README.md](./README.md)** section "Test Automation Code Requirements". - -**Common mistakes for AI agents to avoid**: -- ❌ No cleanup: Always use `defer resource.Delete(oc)` BEFORE `resource.Create(oc)` -- ❌ Hardcoded names: Use case ID for naming: `name := "test-extension-" + caseID` -- ❌ Missing timeouts: Always specify timeout for Wait functions -- ❌ Hard sleeps: Use Wait functions instead of `time.Sleep()` -- ❌ `o.Expect` in `wait.Poll`: Use `if err != nil { return false, err }` pattern instead - -**See existing test patterns**: `specs/olmv1_ce.go` and `specs/olmv1_cc.go` - -## Quick Reference - -For complete workflow including openshift-tests integration and PR requirements, see **[README.md](./README.md)** section "Local Development Workflow". - -### Build and Run -```bash -make bindata && make verify && make build && make update-metadata # Full build - -./bin/olmv1-tests-ext list -o names | grep "keyword" # Find test -./bin/olmv1-tests-ext run-test "" # Run test -``` - -### Test Naming Convention -``` -[sig-olmv1][Jira:OLM] OLMv1 PolarionID:XXXXX-[Skipped:XXX]description[Serial|Slow|Disruptive] -``` - -### Key Labels (See README.md for complete list) -- `ReleaseGate` - Promotes Extended case to openshift-tests (don't add to `[Disruptive]`, `[Slow]`, or `StressTest`) -- `Extended` - Auto-added to cases under test/qe/specs/ -- `StressTest` - Stress testing -- `NonHyperShiftHOST` - Skip on HyperShift hosted clusters (FUTURE USE) - -### Key OLM v1 Resources -- **ClusterCatalog**: Cluster-scoped catalog of operator bundles -- **ClusterExtension**: Cluster-scoped operator installation and management - -### Key OLM v1 Namespaces (OpenShift) -- **openshift-operator-controller**: Operator controller components -- **openshift-catalogd**: Catalogd components - -## Resources - -- [OLM v1 OpenShift Product Code](https://github.com/openshift/operator-framework-operator-controller) -- [Ginkgo v2 Documentation](https://onsi.github.io/ginkgo/) -- [OpenShift Tests Extension](https://github.com/openshift-eng/openshift-tests-extension) -- [Test Extensions in Origin](https://github.com/openshift/origin/blob/main/docs/test_extensions.md) -- [OpenShift CI Requirements](https://docs.google.com/document/d/1cFZj9QdzW8hbHc3H0Nce-2xrJMtpDJrwAse9H7hLiWk/edit?tab=t.0#heading=h.tjtqedd47nnu) - -## Debugging - -**Investigation Priority** when tests fail: -1. **First**: Check test code in `tests-extension/test/qe/` -2. **Second**: Check test utilities in `tests-extension/test/qe/util/olmv1util/` -3. **Third**: Check resource status and conditions via `oc describe` -4. **Fourth**: Check operator-controller and catalogd logs -5. **Last**: Refer to product code to understand expected behavior - -**For deeper investigation** (when you need to refer to product code): -1. **Locate product code**: See **Product Code References** section below -2. **Trace code flow**: Use product code to understand expected behavior -3. **Compare implementation**: Check if test expectations match product implementation -4. **Check recent changes**: Look for recent commits that might have changed behavior - -**Key Namespaces** (OpenShift): -- `openshift-operator-controller`: operator-controller components -- `openshift-catalogd`: catalogd components - -**Common Debugging Commands**: -```bash -# Check resource status -oc get clusterextension -oc get clustercatalog -oc describe clusterextension -oc describe clustercatalog - -# Check logs -oc logs -n openshift-operator-controller deployment/operator-controller-controller-manager -f -oc logs -n openshift-catalogd deployment/catalogd-controller-manager -f -``` - -## Notes for AI Agents - -### Suggesting Test Locations - -When discussing whether a feature needs testing: - -**✅ DO**: Provide simple, focused guidance on QE test placement -- Example: "If you need to write QE tests for this functionality, they should go in `tests-extension/test/qe/specs/`." -- Keep suggestions within the scope of this AGENTS.md (QE tests only) - -**❌ DON'T**: -- Discuss DEV test locations (e.g., unit tests in product code directories) -- Explain the difference between QE and DEV tests unless explicitly asked -- Provide detailed test categorization unless the user is actively writing tests - -**Remember**: This AGENTS.md is for QE test code in `tests-extension/test/qe/` only. Product code testing (DEV tests) is outside this scope. - -### Critical Points - -1. **Test Scope**: - - This AGENTS.md applies ONLY to QE migration test code under `test/qe/` - - Origin migration tests (outside `test/qe/`) have different patterns and are NOT covered here - -2. **Suite Definitions Source**: - - Always check `cmd/main.go` for current suite definitions - - Suite qualifiers may change over time - -3. **Extended Label Mechanism**: - - Tests under `test/qe/specs/` automatically get `Extended` label - - Only `Extended + ReleaseGate` cases can be used in OpenShift General Jobs - - Extended cases without `ReleaseGate` run only in QE periodic jobs - -4. **ReleaseGate is Critical**: - - Determines if Extended case can be used in OpenShift General Jobs and PR Presubmit Jobs - - All cases are executed via `openshift-tests` command - -5. **Most Failures are Test Code Issues**: - - Always investigate test code first before looking at product code - - Refer to Debugging section for investigation priority - -### Test Development Guidelines - -1. **Component Tag**: Always use `[sig-olmv1]` (not `[sig-operator]`) -2. **Utilities**: Use `olmv1util` package (not `olmv0util`) -3. **API Focus**: Test OLM v1 APIs (ClusterExtension, ClusterCatalog) not OLM v0 APIs -4. **Cleanup**: Always use defer for cleanup to ensure resources are removed -5. **Resource Naming**: Use Polarion case ID for naming resources (NOT random strings) - - Extract case ID from test title: `PolarionID:12345` → `caseID := "12345"` - - Apply to all resources: `namespace := "test-ns-" + caseID` - - Benefits: Easier debugging, consistent naming, traceable to test cases -6. **Suite Logic**: Understand the qualifier logic for different test suites - - Refer to Test Suite Definitions section for suite hierarchy - - Understand which suite your test belongs to based on labels -7. **Feature Gates**: For tests depending on feature gates, see **[README.md](./README.md)** section "Label Requirements" for detailed handling patterns: - - Case 1: Test only runs when feature gate is enabled → Add `[OCPFeatureGate:xxxx]` in title - - Case 2: Test runs with/without gate but different behaviors → Use `IsFeaturegateEnabled` check (no label) - - Case 3: Test runs same way regardless of gate → No label, no check - -### Cluster Topologies - -**Note**: OLM v1 currently supports only a subset of OpenShift topologies. Support for additional topologies may be added in future releases. - -**Currently Supported**: -- **Standard OCP**: Regular OpenShift clusters -- **SNO (Single Node OpenShift)**: Single-node clusters - -**NOT Currently Supported** (for future releases): -- **Microshift**: Lightweight OpenShift for edge (not yet supported by OLM v1) -- **HyperShift Hosted**: Hosted control plane clusters (not yet supported by OLM v1) -- **HyperShift Management**: Management clusters for hosted control planes (not yet supported by OLM v1) - -**Network Connectivity**: -- **Connected**: Full internet access -- **Disconnected**: No internet access (air-gapped) -- **Proxy**: Internet access through proxy - -Use skip labels in test titles for topology-specific tests: -- `[Skipped:Disconnected]`: Test requires internet access -- `[Skipped:Connected]`: Test requires disconnected environment -- `[Skipped:Proxy]`: Test incompatible with proxy - -### Common Pitfalls - -**Test Code Issues**: -1. ❌ **Don't** use `o.Expect` inside `wait.Poll` loops (causes panic) -2. ❌ **Don't** use quotes in test titles (breaks XML parsing) -3. ❌ **Don't** execute logic in `g.Describe` blocks (only initialization) -4. ❌ **Don't** forget to add `Extended` label (but it's automatic for `test/qe/specs/`) -5. ❌ **Don't** add `ReleaseGate` to `[Disruptive]`, `[Slow]`, or `StressTest` cases -6. ❌ **Don't** forget cleanup in `g.AfterEach` with defer -7. ❌ **Don't** assume namespace exists - create with unique name -8. ❌ **Don't** hardcode resource names - use case ID for naming - -**OLM v1 Specific Issues**: -9. ❌ **Don't** assume multi-tenancy - OLM v1 explicitly does NOT support it -10. ❌ **Don't** forget to wait for ClusterCatalog `Serving` status before creating ClusterExtension -11. ❌ **Don't** assume catalog is immediately available - allow time for unpacking -12. ❌ **Don't** ignore resolution errors - check ClusterExtension status conditions - -### Best Practices - -**General Test Practices**: -1. ✅ **Do** check suite definitions in `cmd/main.go` before adding tests -2. ✅ **Do** use case ID for naming resources (NOT random strings) -3. ✅ **Do** add proper PolarionID to all test cases -4. ✅ **Do** use skip functions for topology-specific tests -5. ✅ **Do** register defer cleanup BEFORE creating resources - - Pattern: `defer resource.Delete(oc)` then `resource.Create(oc)` - - Why: Ensures cleanup even if Create partially succeeds then fails -6. ✅ **Do** test locally with `olmv1-tests-ext` before submitting PR -7. ✅ **Do** test with `openshift-tests` to verify suite selection -8. ✅ **Do** run stability tests (`/payload-aggregate`) for ReleaseGate cases -9. ✅ **Do** update metadata after test name changes - -**OLM v1 Specific Practices**: -9. ✅ **Do** wait for ClusterCatalog to reach `Serving` status -10. ✅ **Do** check ClusterExtension status conditions for debugging -11. ✅ **Do** verify bundle resources are created in target namespace -12. ✅ **Do** use meaningful names that trace back to test case ID -13. ✅ **Do** clean up ClusterExtension and ClusterCatalog in defer blocks -14. ✅ **Do** understand the data flow: Catalog → Resolution → Installation - -### Build and Run - -For complete workflow and detailed commands, refer to **[README.md](./README.md)** section "Local Development Workflow" and the **[Quick Reference](#quick-reference)** section above. - -**Essential pattern for AI agents**: -1. Build: `make bindata && make build && make update-metadata` -2. Find test: `./bin/olmv1-tests-ext list -o names | grep ` -3. Run locally: `./bin/olmv1-tests-ext run-test ""` -4. Test with openshift-tests: See README.md for environment variables and suite selection -5. Run stability tests: `/payload-aggregate` for ReleaseGate cases (see README.md for details) - -### Working Directory Context - -**Remember**: This AGENTS.md is specifically for QE migration tests under `test/qe/`. If the user is working with: -- Origin migration tests (outside `test/qe/`) → This AGENTS.md does NOT apply -- Product code → This AGENTS.md does NOT apply -- Build infrastructure → This AGENTS.md may partially apply (for suite definitions in `cmd/main.go`) diff --git a/openshift/tests-extension/test/qe/CLAUDE.md b/openshift/tests-extension/test/qe/CLAUDE.md deleted file mode 100644 index 43c994c2d..000000000 --- a/openshift/tests-extension/test/qe/CLAUDE.md +++ /dev/null @@ -1 +0,0 @@ -@AGENTS.md diff --git a/openshift/tests-extension/test/qe/README.md b/openshift/tests-extension/test/qe/README.md index cea6ff772..3870b79e8 100644 --- a/openshift/tests-extension/test/qe/README.md +++ b/openshift/tests-extension/test/qe/README.md @@ -1,18 +1,3 @@ -# OLM v1 QE Test Extension - -> **For AI Agents**: This directory contains comprehensive documentation for AI coding assistants. -> Please read [AGENTS.md](./AGENTS.md) for detailed context about the OLM v1 QE test framework, -> migration guidelines, suite definitions, and best practices. -> -> **Using Claude Code**: If you are using Claude Code as your AI coding assistant: -> 1. Start Claude Code from `test/qe/` directory or its subdirectories (e.g., `test/qe/specs/`) -> 2. On first launch from a subdirectory, Claude Code will prompt you to load the parent AGENTS.md - select **Yes** (subsequent launches will auto-load) -> 3. If starting from `test/qe/` itself, AGENTS.md is automatically loaded -> 4. Use `/memory` to verify AGENTS.md is loaded and view its content -> -> This ensures Claude Code has access to test framework architecture, migration guidelines, -> suite definitions, and code quality standards. - ## Overview When creating test cases based on OTE (OpenShift Tests Extension) in operator-controller, there are two sources: @@ -110,38 +95,72 @@ We need to identify all cases from tests-private among all cases, then mark whic ### Suites for Custom Prow jobs: -For the complete and current suite definitions with detailed qualifiers and comments, refer to **[cmd/main.go](../cmd/main.go)** (lines 103-209). +#### Extended All Suite +```go + ext.AddSuite(e.Suite{ + Name: "olmv1/extended", + Qualifiers: []string{ + `labels.exists(l, l=="Extended")`, + }, + }) +``` -**Suite hierarchy**: +#### Extended ReleaseGate Suite +```go + ext.AddSuite(e.Suite{ + Name: "olmv1/extended/releasegate", + Qualifiers: []string{ + `labels.exists(l, l=="Extended") && labels.exists(l, l=="ReleaseGate")`, + }, + }) ``` -olmv1/extended # All extended tests -├── olmv1/extended/releasegate # Extended + ReleaseGate -└── olmv1/extended/candidate # Extended without ReleaseGate - ├── function # Functional tests (excludes StressTest) - │ ├── parallel # Can run concurrently (excludes [Serial] and [Slow]) - │ ├── serial # Must run one at a time ([Serial] but not [Slow]) - │ ├── fast # Non-slow tests (parallel + serial) - │ └── slow # [Slow] tests - └── stress # StressTest label + +#### Extended Candidate Suite +```go + ext.AddSuite(e.Suite{ + Name: "olmv1/extended/candidate", + Qualifiers: []string{ + `labels.exists(l, l=="Extended") && !labels.exists(l, l=="ReleaseGate")`, + }, + }) ``` -**Key relationships** (defined in main.go): -- `candidate = function + stress` -- `function = parallel + serial + slow = fast + slow` +#### Extended Candidate Parallel Suite +```go + ext.AddSuite(e.Suite{ + Name: "olmv1/extended/candidate/parallel", + Qualifiers: []string{ + `(labels.exists(l, l=="Extended") && !labels.exists(l, l=="ReleaseGate") && !labels.exists(l, l=="StressTest")) && + !(name.contains("[Serial]") || name.contains("[Slow]"))`, + }, + }) +``` -**Note**: All suite qualifiers use helper functions from `test/qe/util/filters/filters.go`: -- `BasedExtendedTests()` - All Extended tests -- `BasedExtendedReleaseGateTests()` - Extended AND ReleaseGate -- `BasedExtendedCandidateTests()` - Extended AND NOT ReleaseGate -- `BasedExtendedCandidateFuncTests()` - Extended AND NOT ReleaseGate AND NOT StressTest +#### CExtended Candidate Serial Suite +```go + ext.AddSuite(e.Suite{ + Name: "olmv1/extended/candidate/serial", + Qualifiers: []string{ + `(labels.exists(l, l=="Extended") && !labels.exists(l, l=="ReleaseGate") && !labels.exists(l, l=="StressTest")) && + (name.contains("[Serial]") && !name.contains("[Slow]"))`, + }, + }) +``` + +#### Extended Candidate Slow Suite +```go + ext.AddSuite(e.Suite{ + Name: "olmv1/extended/candidate/slow", + Qualifiers: []string{ + `(labels.exists(l, l=="Extended") && !labels.exists(l, l=="ReleaseGate") && !labels.exists(l, l=="StressTest")) && + name.contains("[Slow]")`, + }, + }) +``` ## Test Case Migration Guide -**Required For all QE cases**: -- Do not use `&|!,()/` in case title -- Do NOT remove the PolarionID number from the `original-name` label. The PolarionID in `g.Label("original-name:...")` must include the case ID number. - - ✅ **Correct**: `g.Label("original-name:[sig-operator][Jira:OLM] OLMv0 optional should PolarionID:68679-[Skipped:Disconnected]catalogsource with invalid name is created")` - - ❌ **Wrong**: `g.Label("original-name:[sig-operator][Jira:OLM] OLMv0 optional should PolarionID:[Skipped:Disconnected]catalogsource with invalid name is created")` (missing case ID) +**Required For all QE cases**: Do not use `&|!,()/` in case title ### A. Code Changes for Migrated Cases @@ -164,28 +183,20 @@ All migrated test case code needs the following changes to run in the new test f - **Note**: Don't add `ReleaseGate` if case title contains `Disruptive` or `Slow`, or labels contain `StressTest` 4. **Required For Migrated case from test-private**: Add `[OTP]` in case title -#### Optional Labels in Migration/New test cases' title -1. **LEVEL0**: Add `[Level0]` in the case title as a title tag. Do NOT use `g.Label("LEVEL0")`. - - ✅ **Correct**: `g.It("PolarionID:72192-[Level0][OTP]description", func() { ... })` - - ❌ **Wrong**: `g.It("PolarionID:72192-[OTP]description", g.Label("LEVEL0"), func() { ... })` -2. **Author**: Deprecated, remove it. +#### Optional Label for Migration and New +1. **LEVEL0**: Use title label `[Level0]` +2. **Author**: Deprecated 3. **ConnectedOnly**: Add `[Skipped:Disconnected]` in title 4. **DisconnectedOnly**: Add `[Skipped:Connected][Skipped:Proxy]` in title -5. **Case ID**: change it to `PolarionID:xxxxxx` format, and remove the old one from the case title. Such as `-72017-` strings. - - **IMPORTANT**: The PolarionID number should only appear ONCE in the test title - at the beginning as `PolarionID:xxxxx`. Do NOT repeat the number anywhere else in the title. - - **IMPORTANT**: Do NOT add `-` between two consecutive square brackets. Adjacent tags should be written directly together. - - ✅ **Correct**: `PolarionID:12345-[OTP][Skipped:Disconnected]catalog pods do not recover from node failure [Disruptive][Serial]` - - ❌ **Wrong**: `PolarionID:12345-[OTP]-[Skipped:Disconnected]catalog pods do not recover from node failure [Disruptive][Serial]` (dash between brackets) - - ❌ **Wrong**: `PolarionID:12345-[OTP][Skipped:Disconnected]12345-catalog pods do not recover from node failure [Disruptive][Serial]` (repeated ID) - - ❌ **Wrong**: `PolarionID:12345-[OTP][Skipped:Disconnected]12345-support grpc sourcetype [Serial]` (repeated ID) -6. **Importance**: Deprecated, remove it. Such as `Critical`, `High`, `Medium` and `Low` strings. -7. **NonPrerelease**: Deprecated, remove it. - - **Longduration**: Change it to `[Slow]` in case title. - - **ChkUpg**: Deprecated, remove it. Not supported (openshift-tests upgrade differs from OpenShift QE) -8. **VMonly**: Deprecated, and don't migrate the `VMonly` test cases to here. -9. **Slow, Serial, Disruptive**: Preserved, but add them in the end of the title. Such as `"[sig-operator][Jira:OLM] OLMv0 optional should PolarionID: xxx ...[Slow][Serial][Disruptive]"` -10. **DEPRECATED**: Deprecated, don't add this kind of case to here. But, if your test case has been merged into this repo and you want to deprecate it, please add this case into the [IgnoreObsoleteTests](https://github.com/openshift/operator-framework-operator-controller/blob/main/openshift/tests-extension/cmd/main.go). -11. **CPaasrunOnly, CPaasrunBoth, StagerunOnly, StagerunBoth, ProdrunOnly, ProdrunBoth**: Deprecated, remove them. +5. **Case ID**: change to `PolarionID:xxxxxx` +6. **Importance**: Deprecated +7. **NonPrerelease**: Deprecated + - **Longduration**: Change to `[Slow]` in case title + - **ChkUpg**: Not supported (openshift-tests upgrade differs from OpenShift QE) +8. **VMonly**: Deprecated +9. **Slow, Serial, Disruptive**: Preserved +10. **DEPRECATED**: Deprecated, corresponding cases deprecated. Use `IgnoreObsoleteTests` for deprecation after addition +11. **CPaasrunOnly, CPaasrunBoth, StagerunOnly, StagerunBoth, ProdrunOnly, ProdrunBoth**: Deprecated 12. **StressTest**: Use Ginkgo label `g.Label("StressTest")` 13. **NonHyperShiftHOST**: Use Ginkgo label `g.Label("NonHyperShiftHOST")` or use `IsHypershiftHostedCluster` judgment, then skip 14. **HyperShiftMGMT**: Deprecated. For cases needing hypershift mgmt execution, use `g.Label("NonHyperShiftHOST")` and `ValidHypershiftAndGetGuestKubeConf` validation (to be provided when OLMv1 supports hypershift) diff --git a/openshift/tests-extension/test/qe/specs/olmv1_cc.go b/openshift/tests-extension/test/qe/specs/olmv1_cc.go index d9031d2e0..3073e70d2 100644 --- a/openshift/tests-extension/test/qe/specs/olmv1_cc.go +++ b/openshift/tests-extension/test/qe/specs/olmv1_cc.go @@ -22,7 +22,7 @@ import ( var _ = g.Describe("[sig-olmv1][Jira:OLM] clustercatalog", g.Label("NonHyperShiftHOST", "ClusterCatalog"), func() { defer g.GinkgoRecover() var ( - oc = exutil.NewCLIWithoutNamespace("default") + oc = exutil.NewCLI("olmv1-opeco"+exutil.GetRandomString(), exutil.KubeConfigPath()) ) g.BeforeEach(func() { From 42b29e2c00dd5cd77a7dd52d92776a38d761d3a0 Mon Sep 17 00:00:00 2001 From: Forrest Babcock Date: Fri, 5 Dec 2025 14:28:10 -0500 Subject: [PATCH 5/6] Revert "Merge pull request #569 from Xia-Zhao-rh/olmv1-ote-1" This reverts commit c51b19bf149bb1291921d9eb0f96a80cd208f8d3, reversing changes made to 6f593c0498793d01079cffc4423291d4c1103168. --- .../openshift_payload_olmv1.json | 195 --- .../pkg/bindata/operator/operator.go | 35 +- .../tests-extension/pkg/bindata/qe/bindata.go | 193 +-- .../tests-extension/test/qe/specs/olmv1.go | 112 -- .../tests-extension/test/qe/specs/olmv1_ce.go | 1432 ----------------- ...erextension-withoutChannel-OwnSingle.yaml} | 16 +- ...electorlabel-withoutChannel-OwnSingle.yaml | 45 - 7 files changed, 74 insertions(+), 1954 deletions(-) rename openshift/tests-extension/test/qe/testdata/olm/{clusterextension-withselectorlabel-WithoutVersion.yaml => clusterextension-withoutChannel-OwnSingle.yaml} (71%) delete mode 100644 openshift/tests-extension/test/qe/testdata/olm/clusterextension-withselectorlabel-withoutChannel-OwnSingle.yaml diff --git a/openshift/tests-extension/.openshift-tests-extension/openshift_payload_olmv1.json b/openshift/tests-extension/.openshift-tests-extension/openshift_payload_olmv1.json index c28c3f929..f587662af 100644 --- a/openshift/tests-extension/.openshift-tests-extension/openshift_payload_olmv1.json +++ b/openshift/tests-extension/.openshift-tests-extension/openshift_payload_olmv1.json @@ -16,36 +16,6 @@ "exclude": "topology==\"External\"" } }, - { - "name": "[sig-olmv1][Jira:OLM] cluster-olm-operator PolarionID:78393-[OTP][Skipped:Disconnected]support metrics", - "labels": { - "Extended": {}, - "NonHyperShiftHOST": {} - }, - "resources": { - "isolation": {} - }, - "source": "openshift:payload:olmv1", - "lifecycle": "blocking", - "environmentSelector": { - "exclude": "topology==\"External\"" - } - }, - { - "name": "[sig-olmv1][Jira:OLM] cluster-olm-operator PolarionID:79770-[OTP][Level0]metrics are collected by default", - "labels": { - "Extended": {}, - "NonHyperShiftHOST": {} - }, - "resources": { - "isolation": {} - }, - "source": "openshift:payload:olmv1", - "lifecycle": "blocking", - "environmentSelector": { - "exclude": "topology==\"External\"" - } - }, { "name": "[sig-olmv1][Jira:OLM] clustercatalog PolarionID:69242-[OTP][Skipped:Disconnected]Catalogd deprecated package bundlemetadata catalogmetadata from clustercatalog CR", "originalName": "[sig-olmv1][Jira:OLM] clustercatalog PolarionID:69242-[Skipped:Disconnected]Catalogd deprecated package bundlemetadata catalogmetadata from clustercatalog CR", @@ -595,171 +565,6 @@ "exclude": "topology==\"External\"" } }, - { - "name": "[sig-olmv1][Jira:OLM] clusterextension PolarionID:69196-[OTP][Level0][Skipped:Disconnected]Supports Version Ranges during clusterextension upgrade", - "labels": { - "Extended": {}, - "NonHyperShiftHOST": {} - }, - "resources": { - "isolation": {} - }, - "source": "openshift:payload:olmv1", - "lifecycle": "blocking", - "environmentSelector": { - "exclude": "topology==\"External\"" - } - }, - { - "name": "[sig-olmv1][Jira:OLM] clusterextension PolarionID:68821-[OTP][Skipped:Disconnected]Supports Version Ranges during Installation", - "labels": { - "Extended": {}, - "NonHyperShiftHOST": {} - }, - "resources": { - "isolation": {} - }, - "source": "openshift:payload:olmv1", - "lifecycle": "blocking", - "environmentSelector": { - "exclude": "topology==\"External\"" - } - }, - { - "name": "[sig-olmv1][Jira:OLM] clusterextension PolarionID:74108-[OTP][Skipped:Disconnected][Slow]olm v1 supports legacy upgrade edges", - "labels": { - "Extended": {}, - "NonHyperShiftHOST": {} - }, - "resources": { - "isolation": {} - }, - "source": "openshift:payload:olmv1", - "lifecycle": "blocking", - "environmentSelector": { - "exclude": "topology==\"External\"" - } - }, - { - "name": "[sig-olmv1][Jira:OLM] clusterextension PolarionID:74923-[OTP][Skipped:Disconnected]no two ClusterExtensions can manage the same underlying object", - "labels": { - "Extended": {}, - "NonHyperShiftHOST": {} - }, - "resources": { - "isolation": {} - }, - "source": "openshift:payload:olmv1", - "lifecycle": "blocking", - "environmentSelector": { - "exclude": "topology==\"External\"" - } - }, - { - "name": "[sig-olmv1][Jira:OLM] clusterextension PolarionID:75501-[OTP][Skipped:Disconnected]the updates of various status fields is orthogonal", - "labels": { - "Extended": {}, - "NonHyperShiftHOST": {} - }, - "resources": { - "isolation": {} - }, - "source": "openshift:payload:olmv1", - "lifecycle": "blocking", - "environmentSelector": { - "exclude": "topology==\"External\"" - } - }, - { - "name": "[sig-olmv1][Jira:OLM] clusterextension PolarionID:76685-[OTP][Skipped:Disconnected]olm v1 supports selecting catalogs [Serial]", - "labels": { - "Extended": {}, - "NonHyperShiftHOST": {} - }, - "resources": { - "isolation": {} - }, - "source": "openshift:payload:olmv1", - "lifecycle": "blocking", - "environmentSelector": { - "exclude": "topology==\"External\"" - } - }, - { - "name": "[sig-olmv1][Jira:OLM] clusterextension PolarionID:77972-[OTP][Skipped:Disconnected]olm v1 Supports MaxOCPVersion in properties file", - "labels": { - "Extended": {}, - "NonHyperShiftHOST": {} - }, - "resources": { - "isolation": {} - }, - "source": "openshift:payload:olmv1", - "lifecycle": "blocking", - "environmentSelector": { - "exclude": "topology==\"External\"" - } - }, - { - "name": "[sig-olmv1][Jira:OLM] clusterextension PolarionID:82249-[OTP][Skipped:Disconnected]Verify olmv1 support for float type maxOCPVersion in properties file", - "labels": { - "Extended": {}, - "NonHyperShiftHOST": {} - }, - "resources": { - "isolation": {} - }, - "source": "openshift:payload:olmv1", - "lifecycle": "blocking", - "environmentSelector": { - "exclude": "topology==\"External\"" - } - }, - { - "name": "[sig-olmv1][Jira:OLM] clusterextension PolarionID:80117-[OTP][Skipped:Disconnected] Single Namespace Install Mode should be supported", - "labels": { - "Extended": {}, - "NonHyperShiftHOST": {} - }, - "resources": { - "isolation": {} - }, - "source": "openshift:payload:olmv1", - "lifecycle": "blocking", - "environmentSelector": { - "exclude": "topology==\"External\"" - } - }, - { - "name": "[sig-olmv1][Jira:OLM] clusterextension PolarionID:80120-[OTP][Skipped:Disconnected] Own Namespace Install Mode should be supported", - "labels": { - "Extended": {}, - "NonHyperShiftHOST": {} - }, - "resources": { - "isolation": {} - }, - "source": "openshift:payload:olmv1", - "lifecycle": "blocking", - "environmentSelector": { - "exclude": "topology==\"External\"" - } - }, - { - "name": "[sig-olmv1][Jira:OLM] clusterextension PolarionID:82136-[OTP][Skipped:Disconnected]olm v1 supports NetworkPolicy resources", - "labels": { - "Extended": {}, - "NonHyperShiftHOST": {} - }, - "resources": { - "isolation": {} - }, - "source": "openshift:payload:olmv1", - "lifecycle": "blocking", - "environmentSelector": { - "exclude": "topology==\"External\"" - } - }, { "name": "[sig-olmv1][Jira:OLM] OLM v1 for stress PolarionID:81509-[OTP][Skipped:Disconnected][OlmStress]olmv1 create mass operator to see if they all are installed successfully [Slow][Timeout:330m]", "labels": { diff --git a/openshift/tests-extension/pkg/bindata/operator/operator.go b/openshift/tests-extension/pkg/bindata/operator/operator.go index f849820b8..d4a997490 100644 --- a/openshift/tests-extension/pkg/bindata/operator/operator.go +++ b/openshift/tests-extension/pkg/bindata/operator/operator.go @@ -2,7 +2,6 @@ // sources: // testdata/operator/Dockerfile // testdata/operator/manifests/registry.clusterserviceversion.yaml -// testdata/operator/manifests/script.configmap.yaml // testdata/operator/metadata/annotations.yaml // testdata/operator/metadata/properties.yaml // testdata/operator/tests/scorecard/config.yaml @@ -97,12 +96,12 @@ func dockerfile() (*asset, error) { return nil, err } - info := bindataFileInfo{name: "Dockerfile", size: 888, mode: os.FileMode(420), modTime: time.Unix(1759972370, 0)} + info := bindataFileInfo{name: "Dockerfile", size: 888, mode: os.FileMode(420), modTime: time.Unix(1760017176, 0)} a := &asset{bytes: bytes, info: info} return a, nil } -var _manifestsRegistryClusterserviceversionYaml = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xd4\x58\x4b\x6f\xdb\xb8\x13\xbf\xfb\x53\x0c\x74\xfe\x4b\xb1\xd3\xa4\x0d\x78\xfa\xa7\x6d\x10\x2c\x90\x17\x92\xec\x1e\xb6\x28\x16\x63\x6a\x62\x73\x4d\x91\x5c\x92\x72\xaa\x66\xf3\xdd\x17\x94\x64\x5b\xb2\x68\xc7\x45\xf7\xb2\xbc\x58\x22\xe7\xf1\x9b\x27\x47\x46\x23\x7e\x23\xeb\x84\x56\x0c\xb4\x21\x8b\x5e\x5b\x97\x71\x6d\x49\x87\x9f\xe2\x68\x39\x41\x69\xe6\x38\x19\x2d\x84\xca\x19\x7c\x92\xa5\xf3\x64\x1f\xc8\x2e\x05\xa7\x96\x75\x54\x90\xc7\x1c\x3d\xb2\x11\x00\x2a\xa5\x3d\x7a\xa1\x95\x0b\xaf\x00\x28\x8b\x94\xbe\x61\x61\x24\x39\x06\x7f\xa7\xf5\x26\xc0\x97\xf6\x17\xe0\x65\xfd\x04\x90\x6c\x00\x25\x0c\x12\xcd\x53\x6d\x72\x5a\xa6\x74\x4c\xd9\x0a\xdf\x93\xc5\x82\x9e\xb5\x5d\x64\x42\x67\x6f\x52\xac\x2d\x48\xfe\xd7\xd5\x13\xcc\x09\x1a\xee\x69\x26\x9c\xb7\x55\xff\x74\x65\x50\xc2\x7a\xe8\x00\x12\x89\x53\x92\x6e\xb0\x5f\x23\x37\xd9\xa2\x9c\x92\x55\xe4\xc9\x05\xcd\x05\x2a\x9c\x51\x9e\x4e\xab\xa0\x69\x51\x3a\xaf\x0b\xf1\x9d\x7a\xaa\x76\xb0\x2a\x2c\x28\x30\xbd\xbc\xc0\xe3\xc5\xc3\x63\xfa\xf1\xd7\x9b\xcf\x57\x17\xf0\xfa\x9a\xf4\x78\x5f\xfb\xa2\x92\x9d\x6c\xa9\xab\x03\xd0\xe5\xee\xf1\x26\xce\x10\x4f\x18\xa8\x52\xca\xf5\xf6\x6b\xfb\xf4\xb5\xfe\xe5\x68\x70\x2a\xa4\xf0\x22\xc4\xf1\x23\x3a\xc1\xe1\x17\xe5\x3c\xb6\x1c\xdc\x12\x7a\xca\xcf\x3d\x83\xe4\x78\x7c\x7c\x9a\x8e\xcf\xd2\xf1\xfb\xc7\xc9\x07\x76\xf2\x8e\x9d\x4c\x7e\x6f\x74\x6f\x92\x2c\x1a\xac\x69\x29\x64\x4e\x76\x93\x8c\xa9\xcb\x17\xe9\x72\x92\xbd\x3b\xc9\x26\x87\x08\x30\x56\xff\x49\xdc\xff\x21\xb1\xd2\xa5\x67\x30\xd3\xb5\x67\x5b\xb9\x75\x3e\x9c\x8c\x00\x82\xa3\x18\x0c\xdc\x94\x2d\xc7\xd9\xb8\x56\x14\x08\x9c\x41\x4e\x0c\x8c\x44\x4e\x73\x1d\xf8\x47\xc1\x4d\x75\x96\x1b\xe1\x9a\x1a\xc8\xe9\x49\x28\xd1\xe4\x3b\xbc\x04\x97\xf1\x3a\xd0\x96\x9c\x2e\x6d\xf7\xbc\x3d\xce\xc9\x71\x2b\x4c\xbb\xb3\x8d\xa0\x7b\x9c\xc1\xe3\xed\xe7\xdb\x2c\xf0\x08\x67\x24\x56\x37\x71\xd4\x23\x00\xc1\xb5\x0a\xb8\x52\x98\xa2\xa3\xf7\x27\x75\x35\x42\xd2\xf8\xbc\xa0\x5c\xa0\xaf\x0c\xb5\x3b\xa2\x89\x5a\x53\x9e\x2b\x8b\xc2\xca\xc9\x48\x5d\x15\xa4\xbc\x5b\x6d\xa5\x50\x67\x3c\xeb\xe4\xca\xbe\x44\x67\xb0\x4e\xf3\xbd\x1c\x3b\xfc\xdf\xe1\xe1\x5a\x79\xab\x65\x6a\x24\x2a\x62\xab\x57\x49\x36\x6d\xb4\xd9\x35\xed\x0e\x59\xe9\x1e\x96\xae\xd1\x61\x59\x32\x52\x70\x74\x0c\x26\x9d\x5d\x47\x92\xb8\xd7\x96\xf5\x8a\xac\x40\xcf\xe7\x57\x75\x1b\x60\x5b\x85\x7c\x38\x66\x00\xe7\x2d\x7a\x9a\x55\x6d\x56\xac\x96\xa7\xc2\x48\xf4\xb4\xa5\xb3\xd3\x60\xbb\x6b\xd0\x6c\xbb\x2b\x78\x9c\x7b\xb9\xe5\xf9\x9c\x9e\xb0\x94\xbe\xf6\x0e\x0a\x15\x6a\x6d\x08\x2e\x2c\x19\x35\xf1\xc7\x8c\x1c\x7a\x3a\xac\xa5\x96\x65\x41\x03\xd1\x69\x1b\xc9\x26\xff\x5d\x4c\xf1\x93\x98\x5d\xa3\x19\x62\x5a\x25\xc1\xdc\x7b\x93\xa7\x8d\x80\x08\x51\x6b\xfb\xb5\xce\x89\xc1\xf8\xc3\xe9\x69\x24\x7e\xb5\x4f\x22\xd8\xd0\xce\xa2\xce\x28\x0a\x0c\x17\xe3\x97\xe4\xa8\xc5\x7d\x54\x83\xc8\xdc\x3c\xf9\x3a\x20\x17\x05\xce\xa8\xd3\x9f\x3f\xdd\xde\x3c\xde\xdf\x5e\x5d\x5d\xdc\x0f\x5a\x7b\x58\x46\x5b\x1f\x51\x1a\xe0\xac\xa1\xde\x69\xeb\x19\x9c\x8d\xcf\x26\x03\xba\xc6\xcf\xd7\xba\x54\xbb\xa4\xec\x77\x78\x58\x45\xe0\xbe\x43\x3f\x67\x70\xb4\x8f\xce\x12\xe6\xb7\x4a\x56\x0c\xbc\x2d\x69\x40\x22\xc5\x92\x14\x39\x77\x67\xf5\x94\x62\x58\x82\xd3\x2e\xc9\xc7\x8e\x00\x4c\xa3\x7f\x4e\x28\xfd\xfc\x7b\x9c\x64\xb7\x1b\x42\xc3\x13\x5e\xa0\xfc\x4c\x12\xab\x07\xe2\x5a\xe5\xa1\xd2\xb7\xa3\x5f\x8b\x21\x2b\x74\xbe\xa6\x39\x1e\x0f\x68\x1a\x97\xc5\x4b\xa6\x71\x83\xf8\x69\x43\x83\x94\xea\xdf\xb2\xf3\x00\x33\x27\x43\x33\x57\xd7\x57\x34\x71\xa4\x28\x44\x3c\xa5\x00\xb8\x29\x19\x9c\x8e\xc7\x45\x3c\x9d\xa8\xd0\xb6\x62\x30\x39\x3e\xbb\x16\x11\x0a\x4b\x7f\x95\xe4\xf6\xca\x9e\xbc\x21\xfa\xfd\x49\x44\xb2\x23\x5e\x5a\xe1\xab\x4f\x5a\x79\xfa\x16\xf5\x3e\x4a\xa9\x9f\xef\xac\x58\x0a\x49\x33\xba\x70\x1c\x25\x36\x97\xf4\x13\x4a\x37\x4c\xe9\xad\x89\x28\x8a\x29\xb7\x3a\xda\xad\x42\xed\x9d\x5f\x5d\x0d\x61\x7a\xb4\xbe\x34\x3f\x95\x3c\xb5\x0c\xca\x7f\x3c\x7b\x9e\x50\xc8\xd2\xd2\xe3\xdc\x92\x0b\xe3\x0e\x83\x77\xc3\xbc\x78\x3b\x77\xde\x74\xb5\x2d\xd5\xb9\xbb\xd1\xea\x5e\x6b\x1f\x6d\x17\xed\x64\x75\xce\x79\x68\x3e\xcd\xd0\xd3\xb6\xee\x2d\x4a\x4f\xb6\x10\xaa\x0e\xd3\xa5\x45\x4e\x77\x3b\xb0\x99\x40\xe7\x5c\xf7\x9e\x4c\xc1\x96\xb2\x1b\xb7\x34\x0c\x75\x97\x56\x97\xa6\x17\xcc\x74\x35\x45\xb5\xe0\x63\x75\x91\xb6\x77\x53\x81\xa6\xdb\x1d\x97\x64\xa7\x5b\x74\x33\xf2\xbd\x77\x29\x5c\x7f\xe3\x39\x4c\x17\x7d\xd1\xf5\x5c\xdd\xdb\x2a\x4d\xbe\xbd\x65\x06\x7c\x39\x49\xea\x10\xed\x34\x8f\x6b\x6d\xf3\xd6\x89\xd9\xe2\x2c\x0c\x09\x6f\xdb\x2b\x09\x1d\xfd\xf7\x6c\x3d\x24\x94\xb4\x0c\xf3\xef\x7e\xd3\x22\x38\xfb\xa0\x62\x29\x7c\xf0\x84\xba\x19\x0d\x37\x03\xf9\x66\x6c\x0f\xd3\x8b\x6b\x66\x7d\x57\x9a\x50\xd1\x94\x77\xca\xa8\x99\xf3\x6f\x9f\xd5\xcd\xea\x0b\xe6\x0d\xd2\x07\xa1\x66\x92\x76\x52\x6f\x9a\x5f\x43\x7e\x5d\x4a\x2f\x0e\x95\x7d\x2e\xe5\x9a\x34\xb8\x74\x41\xd5\xb3\xb6\x79\x0b\x3f\x36\xfe\x4b\xa1\x16\xed\xf1\xbe\x6f\x84\xd2\xca\x66\xda\x73\xec\xe8\x68\xf8\x19\x97\xeb\x02\x85\x1a\x85\x61\x5d\x74\x47\xba\x14\xa8\x40\x21\x19\x54\xba\xb4\xff\xaf\x9f\x33\xae\x9b\xfb\xa4\x51\x77\xbd\x66\x80\x80\xbc\x16\xe1\xeb\x7e\xc6\xa0\xfe\x1f\x61\x04\x60\xac\x5e\x8a\xf0\x95\xda\xe1\xbb\x6b\xf7\x56\x5c\x5b\x10\x83\xbe\x0d\xaa\xe5\xea\x2f\x97\xe6\x6b\xf3\x9f\x00\x00\x00\xff\xff\x78\xa4\x24\x61\x85\x11\x00\x00") +var _manifestsRegistryClusterserviceversionYaml = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xd4\x57\x4d\x6f\xe3\x36\x13\xbe\xfb\x57\x0c\x74\x7e\xe9\xd8\xd9\xec\xbe\x01\x4f\xcd\xee\x06\x8b\x02\xf9\x42\x92\xf6\xd0\xa2\x28\xc6\xd4\xc4\x66\x4d\x91\x2c\x49\x39\xab\x4d\xf3\xdf\x0b\x52\x92\x2d\x59\x72\x92\x45\x7b\x29\x2f\x92\xe6\xf3\x99\xe1\x70\x38\x42\x2b\x7f\x26\xe7\xa5\xd1\x1c\x8c\x25\x87\xc1\x38\x3f\x15\xc6\x91\x89\x8f\xe2\x68\x33\x47\x65\x57\x38\x9f\xac\xa5\xce\x39\x7c\x52\xa5\x0f\xe4\xee\xc8\x6d\xa4\xa0\x46\x75\x52\x50\xc0\x1c\x03\xf2\x09\x00\x6a\x6d\x02\x06\x69\xb4\x8f\x9f\x00\xa8\x0a\x46\x5f\xb1\xb0\x8a\x3c\x87\xbf\x58\x22\x02\xfc\xda\x3c\x01\x9e\xb6\x6f\x00\xd9\x0e\x50\xc6\x21\x33\x82\x19\x9b\xd3\x86\xd1\x31\x4d\x5b\x7c\x0f\x0e\x0b\x7a\x34\x6e\x3d\x95\x66\xfa\xaa\xc4\x36\x82\xec\x7f\x5d\x3f\x31\x9c\xe8\xe1\x96\x96\xd2\x07\x57\xf5\xb9\x6d\x40\x19\xef\xa1\x03\xc8\x14\x2e\x48\xf9\x01\x3d\x21\xb7\xd3\x75\xb9\x20\xa7\x29\x90\x8f\x9e\x0b\xd4\xb8\xa4\x9c\x2d\xaa\xe8\x69\x5d\xfa\x60\x0a\xf9\x8d\x7a\xae\x0e\xa8\x6a\x2c\x28\x2a\x3d\x3d\xc1\xfd\xf9\xdd\x3d\xfb\xf8\xd3\xd5\xe7\x8b\x73\x78\x7e\xce\x7a\xba\xcf\x7d\x53\xd9\x41\x35\xe6\xd3\x06\x74\xb5\x7b\xba\x99\xb7\x24\x32\x0e\xba\x54\x6a\x4b\x7e\x6e\xde\x7e\x4b\x4f\x81\x16\x17\x52\xc9\x20\xe3\x3e\x7e\x44\x2f\x05\xfc\xa8\x7d\xc0\x46\x43\x38\xc2\x40\xf9\x59\xe0\x90\x1d\xcf\x8e\xdf\xb3\xd9\x29\x9b\x7d\xb8\x9f\xff\x9f\x9f\xbc\xe3\x27\xf3\x5f\x6a\xdf\xbb\x22\x1b\xdd\xac\x45\x29\x55\x4e\x6e\x57\x8c\xcc\xe7\x6b\xb6\x99\x4f\xdf\x9d\x4c\xe7\x6f\x31\x60\x9d\xf9\x83\x44\xf8\x5d\x61\x65\xca\xc0\x61\x69\x52\x66\x1b\xbb\xa9\x1e\x4e\x26\x00\x31\x51\x1c\x06\x69\x9a\x6e\x66\xd3\x59\x72\x14\x05\xbc\x45\x41\x1c\xac\x42\x41\x2b\x13\xf5\x27\x31\x4d\xa9\xca\xad\xf4\xf5\x19\xc8\xe9\x41\x6a\x59\xd7\x3b\x3c\xc5\x94\x89\xb4\xd1\x8e\xbc\x29\x5d\x97\xdf\xb0\x73\xf2\xc2\x49\xdb\x50\xf6\x11\x74\xd9\x53\xb8\xbf\xfe\x7c\x3d\x8d\x3a\xd2\x5b\x85\xd5\xd5\x38\xea\x09\x80\x14\x46\x47\x5c\x0c\x16\xe8\xe9\xc3\x49\x3a\x8d\x90\xd5\x39\x2f\x28\x97\x18\x2a\x4b\x0d\x45\xd6\xbb\x56\x1f\xcf\x36\xa2\xb8\x72\xb2\xca\x54\x05\xe9\xe0\x5b\x12\x83\x54\xf1\xbc\x53\x2b\x2f\x15\x3a\x87\x6d\x99\xbf\xa8\x71\x20\xff\x1d\x1d\x61\x74\x70\x46\x31\xab\x50\x13\x6f\x3f\x15\x39\x56\x7b\x73\x5b\xd9\x03\xb6\xd8\x0b\x2a\xdd\xa0\xe3\x72\x64\x95\x14\xe8\x39\xcc\x3b\x54\x4f\x8a\x44\x30\x8e\xf7\x0e\x59\x81\x41\xac\x2e\x52\x1b\xe0\x7b\x07\xf9\xed\x98\x01\x7c\x70\x18\x68\x59\x35\x55\xd1\xae\x40\x85\x55\x18\x68\xcf\x67\xa7\xc1\x76\xd7\xa0\xd9\x76\x57\xcc\xb8\x08\x6a\x2f\xf3\x39\x3d\x60\xa9\x42\xca\x0e\x4a\x1d\xcf\xda\x10\x5c\x5c\x6a\x34\xc4\xef\x0b\x72\x98\xe9\xd6\x42\x72\x3d\xb0\xce\x00\xdd\x72\xd4\x67\x51\xa0\xce\x87\x0c\x06\x5e\x11\xd9\x11\x7a\x36\x9f\xcd\x66\xd9\x80\x21\x0b\x5c\x52\xa7\x43\x7e\xba\xbe\xba\xbf\xbd\xbe\xb8\x38\xbf\x1d\x34\xd7\x94\x04\xb9\x21\x4d\xde\xdf\x38\xb3\xa0\xa1\x7b\x80\x55\x08\xf6\x0b\x85\x31\x16\x80\xc5\xb0\xe2\x70\xb4\x22\x54\x61\xf5\x6d\x5c\xc4\xb8\xc0\xe1\x74\x76\x3a\x1f\x61\xa7\xc6\x81\xea\x33\x29\xac\xee\x48\x18\x9d\xc7\x0a\x7d\x3f\x22\x69\xc9\x49\x93\x6f\x65\x8e\x67\x03\x99\xfa\x94\x8c\x6f\x75\x3c\x00\x98\xcb\x7f\x1c\x68\xb4\x52\xfd\x5b\x71\xbe\x21\xcc\xf9\x30\xcc\xb6\xed\x8e\x14\x51\xdc\xcd\x42\x86\x51\x0e\x80\xb0\x25\x87\xf7\xb3\x59\x31\xca\x2d\xa8\x30\xae\xe2\x30\x3f\x3e\xbd\x94\x23\x12\x8e\xfe\x2c\xc9\xbf\x68\x7b\xfe\x8a\xe9\x0f\x27\x23\x96\x3d\x89\xd2\xc9\x50\x7d\x32\x3a\xd0\xd7\xd1\xec\xa3\x52\xe6\xf1\xc6\xc9\x8d\x54\xb4\xa4\x73\x2f\x50\x61\x7d\xb9\x3c\xa0\xf2\x34\xa2\xd1\xbb\xc9\x47\x31\xe5\xce\xd8\x71\x0e\x83\xb3\x8b\x8b\xc9\x77\x82\x74\xa5\x3e\xf3\x57\x46\xdf\x1a\x13\x38\x04\x57\xee\xa3\x6a\xee\xd2\x33\x21\x4c\xa9\x43\x7d\xcd\x35\x8d\x6a\x4f\x32\x90\x2b\xa4\x4e\x01\x7e\x71\x28\xe8\xe6\x40\x45\xd8\x28\xe7\x7d\xb7\x33\x32\x70\xa5\xea\x46\xcc\xe2\x35\xfe\xc5\x99\xd2\xf6\xd2\xc0\xda\x7b\xb3\x01\x3f\x56\x51\x2c\x36\xb1\x07\xb9\x2c\xd0\xfa\x0e\x79\x43\x6e\xb1\x27\xb7\xa4\xd0\xfb\x56\xd2\xf7\x09\x8f\xf1\x3e\xe9\x9b\x4e\x93\x54\x8f\x54\xda\x7c\x9f\x64\x07\x7a\x39\x29\xea\x08\x1d\x0c\x4f\x18\xe3\xf2\x26\x89\xd3\xf5\x69\xbc\x16\x5e\x8f\x57\x11\x7a\xfa\xef\xc5\xfa\x96\xad\xa4\x4d\x9c\x78\x5e\x0e\x6d\x04\x67\x1f\xd4\x58\x09\xbf\x79\x26\xd9\x0d\x03\xbb\x11\x6c\x37\xa8\x5d\x9a\xbc\x86\xcc\xc0\x97\x36\x76\x52\xca\x3b\xc7\xa8\x9e\xec\xae\x1f\xf5\x55\x3b\xb3\xbe\x22\x7a\x27\xf5\x52\xd1\x41\xe9\x5d\xdb\xa8\xc5\x2f\x4b\x15\xe4\x5b\x6d\x9f\x29\xb5\x15\x8d\x29\x5d\x53\xf5\x68\x5c\xde\xc0\x1f\x1b\xf8\x94\xd4\xeb\x86\xfd\xd2\x54\x58\x3a\xc5\xd3\x25\xe4\xf9\xd1\xd1\x70\x70\xcf\x4d\x81\x52\x4f\xe2\x78\x26\xbb\xd3\x05\x03\x2a\x50\x2a\x0e\x95\x29\xdd\x0f\xe9\x3d\xfe\xd7\x26\x93\xb5\xbb\xcb\xad\x02\x44\xe4\xc9\x44\x48\xfd\x8c\x43\xfa\x73\x9c\x00\x58\x67\x36\x32\xfe\x97\x74\xf4\x6e\x1a\x5a\xab\xb5\x07\x31\xfa\xdb\xa1\xda\xb4\x3f\xd9\xf5\xff\xc5\xdf\x01\x00\x00\xff\xff\x82\x2e\xe1\x7d\x77\x0f\x00\x00") func manifestsRegistryClusterserviceversionYamlBytes() ([]byte, error) { return bindataRead( @@ -117,27 +116,7 @@ func manifestsRegistryClusterserviceversionYaml() (*asset, error) { return nil, err } - info := bindataFileInfo{name: "manifests/registry.clusterserviceversion.yaml", size: 4485, mode: os.FileMode(420), modTime: time.Unix(1764220580, 0)} - a := &asset{bytes: bytes, info: info} - return a, nil -} - -var _manifestsScriptConfigmapYaml = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x74\xce\xbf\x6e\x83\x30\x10\xc7\xf1\xdd\x4f\xf1\x2b\x9d\x8d\xa1\x13\xf2\xd0\xa5\x73\xd7\xee\x57\x7c\xc4\x56\x82\xb1\xec\x0b\x24\x52\x1e\x3e\x12\xe4\xcf\xc4\x78\xf7\xf9\x4a\x77\x94\xc2\x1f\xe7\x12\xa6\x68\x31\xb7\xea\x18\xa2\xb3\xf8\x99\xe2\x10\x0e\xbf\x94\xd4\xc8\x42\x8e\x84\xac\x02\x22\x8d\x6c\xe1\x45\x92\xd3\xa5\xcf\x21\x89\x7a\xd2\xba\xac\x8b\xb7\xb8\x29\x00\xf8\xfc\x30\xff\x21\x9a\xe2\xd7\x89\x7b\x3f\xa1\x7a\x9c\x41\x5b\x7f\xd5\x4d\xf5\x06\xc9\x67\xc6\x37\xcc\x4c\xd9\x2c\xcb\x62\x8a\x50\x16\x76\xfb\x41\x66\x72\xd7\x7d\x3e\x85\x99\x37\xbd\x70\xbf\x7d\x06\x3d\x40\xfb\x57\x02\x9d\xd0\x35\x5d\xab\xee\x01\x00\x00\xff\xff\xa2\x23\x09\xd5\xfd\x00\x00\x00") - -func manifestsScriptConfigmapYamlBytes() ([]byte, error) { - return bindataRead( - _manifestsScriptConfigmapYaml, - "manifests/script.configmap.yaml", - ) -} - -func manifestsScriptConfigmapYaml() (*asset, error) { - bytes, err := manifestsScriptConfigmapYamlBytes() - if err != nil { - return nil, err - } - - info := bindataFileInfo{name: "manifests/script.configmap.yaml", size: 253, mode: os.FileMode(420), modTime: time.Unix(1764220580, 0)} + info := bindataFileInfo{name: "manifests/registry.clusterserviceversion.yaml", size: 3959, mode: os.FileMode(420), modTime: time.Unix(1760017176, 0)} a := &asset{bytes: bytes, info: info} return a, nil } @@ -157,7 +136,7 @@ func metadataAnnotationsYaml() (*asset, error) { return nil, err } - info := bindataFileInfo{name: "metadata/annotations.yaml", size: 732, mode: os.FileMode(420), modTime: time.Unix(1759972370, 0)} + info := bindataFileInfo{name: "metadata/annotations.yaml", size: 732, mode: os.FileMode(420), modTime: time.Unix(1760017176, 0)} a := &asset{bytes: bytes, info: info} return a, nil } @@ -177,7 +156,7 @@ func metadataPropertiesYaml() (*asset, error) { return nil, err } - info := bindataFileInfo{name: "metadata/properties.yaml", size: 73, mode: os.FileMode(420), modTime: time.Unix(1759972370, 0)} + info := bindataFileInfo{name: "metadata/properties.yaml", size: 73, mode: os.FileMode(420), modTime: time.Unix(1760017176, 0)} a := &asset{bytes: bytes, info: info} return a, nil } @@ -197,7 +176,7 @@ func testsScorecardConfigYaml() (*asset, error) { return nil, err } - info := bindataFileInfo{name: "tests/scorecard/config.yaml", size: 1614, mode: os.FileMode(420), modTime: time.Unix(1759972370, 0)} + info := bindataFileInfo{name: "tests/scorecard/config.yaml", size: 1614, mode: os.FileMode(420), modTime: time.Unix(1760017176, 0)} a := &asset{bytes: bytes, info: info} return a, nil } @@ -256,7 +235,6 @@ func AssetNames() []string { var _bindata = map[string]func() (*asset, error){ "Dockerfile": dockerfile, "manifests/registry.clusterserviceversion.yaml": manifestsRegistryClusterserviceversionYaml, - "manifests/script.configmap.yaml": manifestsScriptConfigmapYaml, "metadata/annotations.yaml": metadataAnnotationsYaml, "metadata/properties.yaml": metadataPropertiesYaml, "tests/scorecard/config.yaml": testsScorecardConfigYaml, @@ -308,7 +286,6 @@ var _bintree = &bintree{nil, map[string]*bintree{ "Dockerfile": &bintree{dockerfile, map[string]*bintree{}}, "manifests": &bintree{nil, map[string]*bintree{ "registry.clusterserviceversion.yaml": &bintree{manifestsRegistryClusterserviceversionYaml, map[string]*bintree{}}, - "script.configmap.yaml": &bintree{manifestsScriptConfigmapYaml, map[string]*bintree{}}, }}, "metadata": &bintree{nil, map[string]*bintree{ "annotations.yaml": &bintree{metadataAnnotationsYaml, map[string]*bintree{}}, diff --git a/openshift/tests-extension/pkg/bindata/qe/bindata.go b/openshift/tests-extension/pkg/bindata/qe/bindata.go index bccc3a73e..d975ed082 100644 --- a/openshift/tests-extension/pkg/bindata/qe/bindata.go +++ b/openshift/tests-extension/pkg/bindata/qe/bindata.go @@ -9,13 +9,12 @@ // test/qe/testdata/olm/clustercatalog-secret.yaml // test/qe/testdata/olm/clustercatalog-withlabel.yaml // test/qe/testdata/olm/clustercatalog.yaml +// test/qe/testdata/olm/clusterextension-withoutChannel-OwnSingle.yaml // test/qe/testdata/olm/clusterextension-withselectorExpressions-WithoutChannelVersion.yaml // test/qe/testdata/olm/clusterextension-withselectorLableExpressions-WithoutChannelVersion.yaml // test/qe/testdata/olm/clusterextension-withselectorlabel-OwnSingle.yaml // test/qe/testdata/olm/clusterextension-withselectorlabel-WithoutChannel.yaml // test/qe/testdata/olm/clusterextension-withselectorlabel-WithoutChannelVersion.yaml -// test/qe/testdata/olm/clusterextension-withselectorlabel-WithoutVersion.yaml -// test/qe/testdata/olm/clusterextension-withselectorlabel-withoutChannel-OwnSingle.yaml // test/qe/testdata/olm/clusterextension-withselectorlabel.yaml // test/qe/testdata/olm/clusterextension.yaml // test/qe/testdata/olm/clusterextensionWithoutChannel.yaml @@ -493,6 +492,56 @@ func testQeTestdataOlmClustercatalogYaml() (*asset, error) { return a, nil } +var _testQeTestdataOlmClusterextensionWithoutchannelOwnsingleYaml = []byte(`apiVersion: template.openshift.io/v1 +kind: Template +metadata: + name: operator-template +objects: +- apiVersion: olm.operatorframework.io/v1 + kind: ClusterExtension + metadata: + name: "${NAME}" + annotations: + olm.operatorframework.io/watch-namespace: "${WATCHNS}" + spec: + namespace: "${INSTALLNAMESPACE}" + serviceAccount: + name: "${SANAME}" + source: + sourceType: "${SOURCETYPE}" + catalog: + packageName: "${PACKAGE}" + version: "${VERSION}" + upgradeConstraintPolicy: "${POLICY}" +parameters: +- name: NAME +- name: INSTALLNAMESPACE +- name: WATCHNS +- name: PACKAGE +- name: VERSION +- name: SANAME +- name: POLICY + value: "CatalogProvided" +- name: SOURCETYPE + value: "Catalog" + +`) + +func testQeTestdataOlmClusterextensionWithoutchannelOwnsingleYamlBytes() ([]byte, error) { + return _testQeTestdataOlmClusterextensionWithoutchannelOwnsingleYaml, nil +} + +func testQeTestdataOlmClusterextensionWithoutchannelOwnsingleYaml() (*asset, error) { + bytes, err := testQeTestdataOlmClusterextensionWithoutchannelOwnsingleYamlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "test/qe/testdata/olm/clusterextension-withoutChannel-OwnSingle.yaml", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + var _testQeTestdataOlmClusterextensionWithselectorexpressionsWithoutchannelversionYaml = []byte(`apiVersion: template.openshift.io/v1 kind: Template metadata: @@ -780,122 +829,6 @@ func testQeTestdataOlmClusterextensionWithselectorlabelWithoutchannelversionYaml return a, nil } -var _testQeTestdataOlmClusterextensionWithselectorlabelWithoutversionYaml = []byte(`apiVersion: template.openshift.io/v1 -kind: Template -metadata: - name: operator-template -objects: -- apiVersion: olm.operatorframework.io/v1 - kind: ClusterExtension - metadata: - name: "${NAME}" - spec: - namespace: "${INSTALLNAMESPACE}" - serviceAccount: - name: "${SANAME}" - source: - sourceType: "${SOURCETYPE}" - catalog: - packageName: "${PACKAGE}" - channels: - - "${CHANNEL}" - selector: - matchLabels: - "${LABELKEY}": "${LABELVALUE}" - upgradeConstraintPolicy: "${POLICY}" -parameters: -- name: NAME -- name: INSTALLNAMESPACE -- name: PACKAGE -- name: CHANNEL -- name: SANAME -- name: POLICY - value: "CatalogProvided" -- name: LABELVALUE - # suggest to use case id -- name: LABELKEY - value: "olmv1-test" -- name: SOURCETYPE - value: "Catalog" -`) - -func testQeTestdataOlmClusterextensionWithselectorlabelWithoutversionYamlBytes() ([]byte, error) { - return _testQeTestdataOlmClusterextensionWithselectorlabelWithoutversionYaml, nil -} - -func testQeTestdataOlmClusterextensionWithselectorlabelWithoutversionYaml() (*asset, error) { - bytes, err := testQeTestdataOlmClusterextensionWithselectorlabelWithoutversionYamlBytes() - if err != nil { - return nil, err - } - - info := bindataFileInfo{name: "test/qe/testdata/olm/clusterextension-withselectorlabel-WithoutVersion.yaml", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} - a := &asset{bytes: bytes, info: info} - return a, nil -} - -var _testQeTestdataOlmClusterextensionWithselectorlabelWithoutchannelOwnsingleYaml = []byte(`apiVersion: template.openshift.io/v1 -kind: Template -metadata: - name: operator-template -objects: -- apiVersion: olm.operatorframework.io/v1 - kind: ClusterExtension - metadata: - name: "${NAME}" - spec: - namespace: "${INSTALLNAMESPACE}" - serviceAccount: - name: "${SANAME}" - config: - configType: Inline - inline: - watchNamespace: "${WATCHNS}" - source: - sourceType: "${SOURCETYPE}" - catalog: - packageName: "${PACKAGE}" - version: "${VERSION}" - selector: - matchLabels: - "${LABELKEY}": "${LABELVALUE}" - upgradeConstraintPolicy: "${POLICY}" -parameters: -- name: NAME -- name: INSTALLNAMESPACE -- name: WATCHNS -- name: PACKAGE -- name: VERSION -- name: SANAME -- name: POLICY - value: "CatalogProvided" -- name: SOURCETYPE - value: "Catalog" -- name: LABELVALUE - # suggest to use case id -- name: LABELKEY - value: "olmv1-test" - - - - -`) - -func testQeTestdataOlmClusterextensionWithselectorlabelWithoutchannelOwnsingleYamlBytes() ([]byte, error) { - return _testQeTestdataOlmClusterextensionWithselectorlabelWithoutchannelOwnsingleYaml, nil -} - -func testQeTestdataOlmClusterextensionWithselectorlabelWithoutchannelOwnsingleYaml() (*asset, error) { - bytes, err := testQeTestdataOlmClusterextensionWithselectorlabelWithoutchannelOwnsingleYamlBytes() - if err != nil { - return nil, err - } - - info := bindataFileInfo{name: "test/qe/testdata/olm/clusterextension-withselectorlabel-withoutChannel-OwnSingle.yaml", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} - a := &asset{bytes: bytes, info: info} - return a, nil -} - var _testQeTestdataOlmClusterextensionWithselectorlabelYaml = []byte(`apiVersion: template.openshift.io/v1 kind: Template metadata: @@ -2317,13 +2250,12 @@ var _bindata = map[string]func() (*asset, error){ "test/qe/testdata/olm/clustercatalog-secret.yaml": testQeTestdataOlmClustercatalogSecretYaml, "test/qe/testdata/olm/clustercatalog-withlabel.yaml": testQeTestdataOlmClustercatalogWithlabelYaml, "test/qe/testdata/olm/clustercatalog.yaml": testQeTestdataOlmClustercatalogYaml, + "test/qe/testdata/olm/clusterextension-withoutChannel-OwnSingle.yaml": testQeTestdataOlmClusterextensionWithoutchannelOwnsingleYaml, "test/qe/testdata/olm/clusterextension-withselectorExpressions-WithoutChannelVersion.yaml": testQeTestdataOlmClusterextensionWithselectorexpressionsWithoutchannelversionYaml, "test/qe/testdata/olm/clusterextension-withselectorLableExpressions-WithoutChannelVersion.yaml": testQeTestdataOlmClusterextensionWithselectorlableexpressionsWithoutchannelversionYaml, "test/qe/testdata/olm/clusterextension-withselectorlabel-OwnSingle.yaml": testQeTestdataOlmClusterextensionWithselectorlabelOwnsingleYaml, "test/qe/testdata/olm/clusterextension-withselectorlabel-WithoutChannel.yaml": testQeTestdataOlmClusterextensionWithselectorlabelWithoutchannelYaml, "test/qe/testdata/olm/clusterextension-withselectorlabel-WithoutChannelVersion.yaml": testQeTestdataOlmClusterextensionWithselectorlabelWithoutchannelversionYaml, - "test/qe/testdata/olm/clusterextension-withselectorlabel-WithoutVersion.yaml": testQeTestdataOlmClusterextensionWithselectorlabelWithoutversionYaml, - "test/qe/testdata/olm/clusterextension-withselectorlabel-withoutChannel-OwnSingle.yaml": testQeTestdataOlmClusterextensionWithselectorlabelWithoutchannelOwnsingleYaml, "test/qe/testdata/olm/clusterextension-withselectorlabel.yaml": testQeTestdataOlmClusterextensionWithselectorlabelYaml, "test/qe/testdata/olm/clusterextension.yaml": testQeTestdataOlmClusterextensionYaml, "test/qe/testdata/olm/clusterextensionWithoutChannel.yaml": testQeTestdataOlmClusterextensionwithoutchannelYaml, @@ -2388,22 +2320,21 @@ var _bintree = &bintree{nil, map[string]*bintree{ "qe": {nil, map[string]*bintree{ "testdata": {nil, map[string]*bintree{ "olm": {nil, map[string]*bintree{ - "basic-bd-plain-image.yaml": {testQeTestdataOlmBasicBdPlainImageYaml, map[string]*bintree{}}, - "basic-bd-registry-image.yaml": {testQeTestdataOlmBasicBdRegistryImageYaml, map[string]*bintree{}}, - "binding-prefligth.yaml": {testQeTestdataOlmBindingPrefligthYaml, map[string]*bintree{}}, - "binding-prefligth_multirole.yaml": {testQeTestdataOlmBindingPrefligth_multiroleYaml, map[string]*bintree{}}, - "cip.yaml": {testQeTestdataOlmCipYaml, map[string]*bintree{}}, - "clustercatalog-secret-withlabel.yaml": {testQeTestdataOlmClustercatalogSecretWithlabelYaml, map[string]*bintree{}}, - "clustercatalog-secret.yaml": {testQeTestdataOlmClustercatalogSecretYaml, map[string]*bintree{}}, - "clustercatalog-withlabel.yaml": {testQeTestdataOlmClustercatalogWithlabelYaml, map[string]*bintree{}}, - "clustercatalog.yaml": {testQeTestdataOlmClustercatalogYaml, map[string]*bintree{}}, + "basic-bd-plain-image.yaml": {testQeTestdataOlmBasicBdPlainImageYaml, map[string]*bintree{}}, + "basic-bd-registry-image.yaml": {testQeTestdataOlmBasicBdRegistryImageYaml, map[string]*bintree{}}, + "binding-prefligth.yaml": {testQeTestdataOlmBindingPrefligthYaml, map[string]*bintree{}}, + "binding-prefligth_multirole.yaml": {testQeTestdataOlmBindingPrefligth_multiroleYaml, map[string]*bintree{}}, + "cip.yaml": {testQeTestdataOlmCipYaml, map[string]*bintree{}}, + "clustercatalog-secret-withlabel.yaml": {testQeTestdataOlmClustercatalogSecretWithlabelYaml, map[string]*bintree{}}, + "clustercatalog-secret.yaml": {testQeTestdataOlmClustercatalogSecretYaml, map[string]*bintree{}}, + "clustercatalog-withlabel.yaml": {testQeTestdataOlmClustercatalogWithlabelYaml, map[string]*bintree{}}, + "clustercatalog.yaml": {testQeTestdataOlmClustercatalogYaml, map[string]*bintree{}}, + "clusterextension-withoutChannel-OwnSingle.yaml": {testQeTestdataOlmClusterextensionWithoutchannelOwnsingleYaml, map[string]*bintree{}}, "clusterextension-withselectorExpressions-WithoutChannelVersion.yaml": {testQeTestdataOlmClusterextensionWithselectorexpressionsWithoutchannelversionYaml, map[string]*bintree{}}, "clusterextension-withselectorLableExpressions-WithoutChannelVersion.yaml": {testQeTestdataOlmClusterextensionWithselectorlableexpressionsWithoutchannelversionYaml, map[string]*bintree{}}, "clusterextension-withselectorlabel-OwnSingle.yaml": {testQeTestdataOlmClusterextensionWithselectorlabelOwnsingleYaml, map[string]*bintree{}}, "clusterextension-withselectorlabel-WithoutChannel.yaml": {testQeTestdataOlmClusterextensionWithselectorlabelWithoutchannelYaml, map[string]*bintree{}}, "clusterextension-withselectorlabel-WithoutChannelVersion.yaml": {testQeTestdataOlmClusterextensionWithselectorlabelWithoutchannelversionYaml, map[string]*bintree{}}, - "clusterextension-withselectorlabel-WithoutVersion.yaml": {testQeTestdataOlmClusterextensionWithselectorlabelWithoutversionYaml, map[string]*bintree{}}, - "clusterextension-withselectorlabel-withoutChannel-OwnSingle.yaml": {testQeTestdataOlmClusterextensionWithselectorlabelWithoutchannelOwnsingleYaml, map[string]*bintree{}}, "clusterextension-withselectorlabel.yaml": {testQeTestdataOlmClusterextensionWithselectorlabelYaml, map[string]*bintree{}}, "clusterextension.yaml": {testQeTestdataOlmClusterextensionYaml, map[string]*bintree{}}, "clusterextensionWithoutChannel.yaml": {testQeTestdataOlmClusterextensionwithoutchannelYaml, map[string]*bintree{}}, diff --git a/openshift/tests-extension/test/qe/specs/olmv1.go b/openshift/tests-extension/test/qe/specs/olmv1.go index 2239c5fe3..a53f1cb97 100644 --- a/openshift/tests-extension/test/qe/specs/olmv1.go +++ b/openshift/tests-extension/test/qe/specs/olmv1.go @@ -1,15 +1,10 @@ package specs import ( - "context" - "fmt" "strings" - "time" g "github.com/onsi/ginkgo/v2" o "github.com/onsi/gomega" - "github.com/tidwall/gjson" - "k8s.io/apimachinery/pkg/util/wait" e2e "k8s.io/kubernetes/test/e2e/framework" exutil "github.com/openshift/operator-framework-operator-controller/openshift/tests-extension/test/qe/util" @@ -45,111 +40,4 @@ var _ = g.Describe("[sig-olmv1][Jira:OLM] cluster-olm-operator", g.Label("NonHyp } }) - g.It("PolarionID:78393-[OTP][Skipped:Disconnected]support metrics", func() { - exutil.SkipOnProxyCluster(oc) - - var metricsMsg string - g.By("get catalogd metrics") - promeEp, err := oc.WithoutNamespace().AsAdmin().Run("get").Args("service", "-n", "openshift-catalogd", "catalogd-service", "-o=jsonpath={.spec.clusterIP}").Output() - o.Expect(err).NotTo(o.HaveOccurred()) - o.Expect(promeEp).NotTo(o.BeEmpty()) - if strings.Count(promeEp, ":") >= 2 { - g.Skip("Skip for IPv6.") - } - queryContent := "https://" + promeEp + ":7443/metrics" - - g.By("Get token") - metricsToken, err := exutil.GetSAToken(oc) - o.Expect(err).NotTo(o.HaveOccurred()) - o.Expect(metricsToken).NotTo(o.BeEmpty()) - - wrongToken, err := oc.AsAdmin().WithoutNamespace().Run("create").Args("token", "openshift-state-metrics", "-n", "openshift-monitoring").Output() - o.Expect(err).NotTo(o.HaveOccurred()) - o.Expect(wrongToken).NotTo(o.BeEmpty()) - - g.By("Get metrics") - podnameStr, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", "-n", "openshift-monitoring", "-l", "prometheus==k8s", "-o=jsonpath='{..metadata.name}'").Output() - o.Expect(podnameStr).NotTo(o.BeEmpty()) - prometheusPodname := strings.Split(strings.Trim(podnameStr, "'"), " ")[0] - - errWait := wait.PollUntilContextTimeout(context.TODO(), 10*time.Second, 30*time.Second, false, func(ctx context.Context) (bool, error) { - metricsMsg, err := oc.AsAdmin().NotShowInfo().WithoutNamespace().Run("exec").Args("-n", "openshift-monitoring", prometheusPodname, "-i", "--", "curl", "-k", "-H", fmt.Sprintf("Authorization: Bearer %v", metricsToken), queryContent).Output() - e2e.Logf("err:%v", err) - if strings.Contains(metricsMsg, "catalogd_http_request_duration_seconds_bucket{code=\"200\"") { - e2e.Logf("found catalogd_http_request_duration_seconds_bucket{code=\"200\"") - return true, nil - } - return false, nil - }) - if errWait != nil { - e2e.Logf("metricsMsg:%v", metricsMsg) - exutil.AssertWaitPollNoErr(errWait, "catalogd_http_request_duration_seconds_bucket{code=\"200\" not found.") - } - - g.By("ClusterRole/openshift-state-metrics has no rule to get the catalogd metrics") - metricsMsg, _ = oc.AsAdmin().NotShowInfo().WithoutNamespace().Run("exec").Args("-n", "openshift-monitoring", prometheusPodname, "-i", "--", "curl", "-k", "-H", fmt.Sprintf("Authorization: Bearer %v", wrongToken), queryContent).Output() - o.Expect(metricsMsg).To(o.ContainSubstring("Authorization denied")) - - g.By("get operator-controller metrics") - promeEp, err = oc.WithoutNamespace().AsAdmin().Run("get").Args("service", "-n", "openshift-operator-controller", "operator-controller-service", "-o=jsonpath={.spec.clusterIP}").Output() - o.Expect(err).NotTo(o.HaveOccurred()) - o.Expect(promeEp).NotTo(o.BeEmpty()) - queryContent = "https://" + promeEp + ":8443/metrics" - - errWait = wait.PollUntilContextTimeout(context.TODO(), 10*time.Second, 30*time.Second, false, func(ctx context.Context) (bool, error) { - metricsMsg, err := oc.AsAdmin().NotShowInfo().WithoutNamespace().Run("exec").Args("-n", "openshift-monitoring", prometheusPodname, "-i", "--", "curl", "-k", "-H", fmt.Sprintf("Authorization: Bearer %v", metricsToken), queryContent).Output() - e2e.Logf("err:%v", err) - if strings.Contains(metricsMsg, "controller_runtime_active_workers") { - e2e.Logf("found controller_runtime_active_workers") - return true, nil - } - return false, nil - }) - if errWait != nil { - e2e.Logf("metricsMsg:%v", metricsMsg) - exutil.AssertWaitPollNoErr(errWait, "controller_runtime_active_workers not found.") - } - - g.By("ClusterRole/openshift-state-metrics has no rule to get the operator-controller metrics") - metricsMsg, _ = oc.AsAdmin().NotShowInfo().WithoutNamespace().Run("exec").Args("-n", "openshift-monitoring", prometheusPodname, "-i", "--", "curl", "-k", "-H", fmt.Sprintf("Authorization: Bearer %v", wrongToken), queryContent).Output() - o.Expect(metricsMsg).To(o.ContainSubstring("Authorization denied")) - - }) - - g.It("PolarionID:79770-[OTP][Level0]metrics are collected by default", func() { - podnameStr, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", "-n", "openshift-monitoring", "-l", "prometheus==k8s", "-o=jsonpath='{..metadata.name}'").Output() - o.Expect(podnameStr).NotTo(o.BeEmpty()) - k8sPodname := strings.Split(strings.Trim(podnameStr, "'"), " ")[0] - - g.By("1) check status of Metrics targets is up") - targetsUrl := "http://localhost:9090/api/v1/targets" - targetsContent, _ := oc.AsAdmin().WithoutNamespace().Run("exec").Args("-n", "openshift-monitoring", k8sPodname, "--", "curl", "-s", targetsUrl).Output() - status := gjson.Get(targetsContent, `data.activeTargets.#(labels.namespace=="openshift-catalogd").health`).String() - if strings.Compare(status, "up") != 0 { - statusAll := gjson.Get(targetsContent, `data.activeTargets.#(labels.namespace=="openshift-catalogd")`).String() - e2e.Logf("catalogd target status: %s", statusAll) - o.Expect(status).To(o.Equal("up")) - } - status = gjson.Get(targetsContent, `data.activeTargets.#(labels.namespace=="openshift-operator-controller").health`).String() - if strings.Compare(status, "up") != 0 { - statusAll := gjson.Get(targetsContent, `data.activeTargets.#(labels.namespace=="openshift-operator-controller")`).String() - e2e.Logf("operator-controller target status: %s", statusAll) - o.Expect(status).To(o.Equal("up")) - } - - g.By("2) check metrics are collected") - queryUrl := "http://localhost:9090/api/v1/query" - query1 := `query=catalogd_http_request_duration_seconds_count{code="200"}` - queryResult1, _ := oc.AsAdmin().WithoutNamespace().Run("exec").Args("-n", "openshift-monitoring", k8sPodname, "--", "curl", "-G", "--data-urlencode", query1, queryUrl).Output() - e2e.Logf("query result 1: %s", queryResult1) - o.Expect(queryResult1).To(o.ContainSubstring("value")) - - query2 := `query=controller_runtime_reconcile_total{controller="controller-operator-cluster-extension-controller",result="success"}` - queryResult2, _ := oc.AsAdmin().WithoutNamespace().Run("exec").Args("-n", "openshift-monitoring", k8sPodname, "--", "curl", "-G", "--data-urlencode", query2, queryUrl).Output() - e2e.Logf("query result 2: %s", queryResult2) - o.Expect(queryResult2).To(o.ContainSubstring("value")) - - g.By("3) test SUCCESS") - }) - }) diff --git a/openshift/tests-extension/test/qe/specs/olmv1_ce.go b/openshift/tests-extension/test/qe/specs/olmv1_ce.go index 6491c6576..9949f3cfd 100644 --- a/openshift/tests-extension/test/qe/specs/olmv1_ce.go +++ b/openshift/tests-extension/test/qe/specs/olmv1_ce.go @@ -1,7 +1,6 @@ package specs import ( - "context" "fmt" "os" "os/exec" @@ -11,7 +10,6 @@ import ( g "github.com/onsi/ginkgo/v2" o "github.com/onsi/gomega" - "k8s.io/apimachinery/pkg/util/wait" e2e "k8s.io/kubernetes/test/e2e/framework" exutil "github.com/openshift/operator-framework-operator-controller/openshift/tests-extension/test/qe/util" @@ -1495,1434 +1493,4 @@ var _ = g.Describe("[sig-olmv1][Jira:OLM] clusterextension", g.Label("NonHyperSh ceArgocd.WaitProgressingMessage(oc, "Desired state reached") }) - g.It("PolarionID:69196-[OTP][Level0][Skipped:Disconnected]Supports Version Ranges during clusterextension upgrade", func() { - var ( - caseID = "69196" - labelValue = caseID - baseDir = exutil.FixturePath("testdata", "olm") - clustercatalogTemplate = filepath.Join(baseDir, "clustercatalog-withlabel.yaml") - clusterextensionTemplate = filepath.Join(baseDir, "clusterextension-withselectorlabel.yaml") - saClusterRoleBindingTemplate = filepath.Join(baseDir, "sa-admin.yaml") - ns = "ns-69196" - sa = "sa69196" - saCrb = olmv1util.SaCLusterRolebindingDescription{ - Name: sa, - Namespace: ns, - Template: saClusterRoleBindingTemplate, - } - clustercatalog = olmv1util.ClusterCatalogDescription{ - Name: "clustercatalog-69196", - LabelValue: labelValue, - Imageref: "quay.io/olmqe/olmtest-operator-index:nginxolm69196", - Template: clustercatalogTemplate, - } - clusterextension = olmv1util.ClusterExtensionDescription{ - Name: "clusterextension-69196", - InstallNamespace: ns, - PackageName: "nginx69196", - Channel: "candidate-v1.0", - Version: "1.0.1", - SaName: sa, - LabelValue: labelValue, - Template: clusterextensionTemplate, - } - ) - - g.By("Create namespace") - defer func() { - _ = oc.WithoutNamespace().AsAdmin().Run("delete").Args("ns", ns, "--ignore-not-found").Execute() - }() - err := oc.WithoutNamespace().AsAdmin().Run("create").Args("ns", ns).Execute() - o.Expect(err).NotTo(o.HaveOccurred()) - o.Expect(olmv1util.Appearance(oc, exutil.Appear, "ns", ns)).To(o.BeTrue()) - - g.By("Create SA for clusterextension") - defer saCrb.Delete(oc) - saCrb.Create(oc) - - g.By("Create clustercatalog") - defer clustercatalog.Delete(oc) - clustercatalog.Create(oc) - - g.By("Create clusterextension with channel candidate-v1.0, version 1.0.1") - defer clusterextension.Delete(oc) - clusterextension.Create(oc) - o.Expect(clusterextension.InstalledBundle).To(o.ContainSubstring("v1.0.1")) - - g.By("update version to be 1.0.3") - clusterextension.Patch(oc, `{"spec":{"source":{"catalog":{"version": "1.0.3"}}}}`) - errWait := wait.PollUntilContextTimeout(context.TODO(), 3*time.Second, 150*time.Second, false, func(ctx context.Context) (bool, error) { - conditions, _ := olmv1util.GetNoEmpty(oc, "clusterextension", clusterextension.Name, "-o", "jsonpath={.status.conditions}") - if !strings.Contains(conditions, "error upgrading") { - e2e.Logf("error message is not raised") - return false, nil - } - return true, nil - }) - if errWait != nil { - _, _ = olmv1util.GetNoEmpty(oc, "clusterextension", clusterextension.Name, "-o=jsonpath-as-json={.status}") - exutil.AssertWaitPollNoErr(errWait, "error message is not raised") - } - - g.By("update version to be >=1.0.1") - clusterextension.Patch(oc, `{"spec":{"source":{"catalog":{"version": ">=1.0.1"}}}}`) - errWait = wait.PollUntilContextTimeout(context.TODO(), 3*time.Second, 150*time.Second, false, func(ctx context.Context) (bool, error) { - resolvedBundle, _ := olmv1util.GetNoEmpty(oc, "clusterextension", clusterextension.Name, "-o", "jsonpath={.status.install.bundle.name}") - if !strings.Contains(resolvedBundle, "v1.0.2") { - e2e.Logf("clusterextension.resolvedBundle is %s, not v1.0.2, and try next", resolvedBundle) - return false, nil - } - return true, nil - }) - if errWait != nil { - _, _ = olmv1util.GetNoEmpty(oc, "clusterextension", clusterextension.Name, "-o=jsonpath-as-json={.status}") - exutil.AssertWaitPollNoErr(errWait, "clusterextension resolvedBundle is not v1.0.2") - } - conditions, _ := olmv1util.GetNoEmpty(oc, "clusterextension", clusterextension.Name, "-o", "jsonpath={.status.conditions}") - o.Expect(strings.ToLower(conditions)).To(o.ContainSubstring("desired state reached")) - o.Expect(conditions).NotTo(o.ContainSubstring("error")) - - g.By("update channel to be candidate-v1.1") - clusterextension.Patch(oc, `{"spec":{"source":{"catalog":{"channels": ["candidate-v1.1"]}}}}`) - errWait = wait.PollUntilContextTimeout(context.TODO(), 3*time.Second, 150*time.Second, false, func(ctx context.Context) (bool, error) { - resolvedBundle, _ := olmv1util.GetNoEmpty(oc, "clusterextension", clusterextension.Name, "-o", "jsonpath={.status.install.bundle.name}") - if !strings.Contains(resolvedBundle, "v1.1.0") { - e2e.Logf("clusterextension.resolvedBundle is %s, not v1.1.0, and try next", resolvedBundle) - return false, nil - } - return true, nil - }) - if errWait != nil { - _, _ = olmv1util.GetNoEmpty(oc, "clusterextension", clusterextension.Name, "-o=jsonpath-as-json={.status}") - exutil.AssertWaitPollNoErr(errWait, "clusterextension resolvedBundle is not v1.1.0") - } - }) - - g.It("PolarionID:68821-[OTP][Skipped:Disconnected]Supports Version Ranges during Installation", func() { - var ( - caseID = "68821" - labelValue = caseID - baseDir = exutil.FixturePath("testdata", "olm") - clustercatalogTemplate = filepath.Join(baseDir, "clustercatalog-withlabel.yaml") - clusterextensionTemplate = filepath.Join(baseDir, "clusterextension-withselectorlabel.yaml") - clusterextensionWithoutChannelTemplate = filepath.Join(baseDir, "clusterextension-withselectorlabel-WithoutChannel.yaml") - clusterextensionWithoutChannelVersionTemplate = filepath.Join(baseDir, "clusterextension-withselectorlabel-WithoutChannelVersion.yaml") - saClusterRoleBindingTemplate = filepath.Join(baseDir, "sa-admin.yaml") - ns = "ns-68821" - sa = "sa68821" - saCrb = olmv1util.SaCLusterRolebindingDescription{ - Name: sa, - Namespace: ns, - Template: saClusterRoleBindingTemplate, - } - clustercatalog = olmv1util.ClusterCatalogDescription{ - Name: "clustercatalog-68821", - LabelValue: labelValue, - Imageref: "quay.io/olmqe/olmtest-operator-index:nginxolm68821", - Template: clustercatalogTemplate, - } - clusterextension = olmv1util.ClusterExtensionDescription{ - Name: "clusterextension-68821", - PackageName: "nginx68821", - Channel: "candidate-v0.0", - Version: ">=0.0.1", - LabelValue: labelValue, - InstallNamespace: ns, - SaName: sa, - Template: clusterextensionTemplate, - } - ) - - g.By("Create namespace") - defer func() { - _ = oc.WithoutNamespace().AsAdmin().Run("delete").Args("ns", ns, "--ignore-not-found").Execute() - }() - err := oc.WithoutNamespace().AsAdmin().Run("create").Args("ns", ns).Execute() - o.Expect(err).NotTo(o.HaveOccurred()) - o.Expect(olmv1util.Appearance(oc, exutil.Appear, "ns", ns)).To(o.BeTrue()) - - g.By("Create SA for clusterextension") - defer saCrb.Delete(oc) - saCrb.Create(oc) - - g.By("Create clustercatalog") - defer clustercatalog.Delete(oc) - clustercatalog.Create(oc) - - g.By("Create clusterextension with channel candidate-v0.0, version >=0.0.1") - defer clusterextension.Delete(oc) - clusterextension.Create(oc) - o.Expect(clusterextension.InstalledBundle).To(o.ContainSubstring("v0.0.3")) - clusterextension.Delete(oc) - - g.By("Create clusterextension with channel candidate-v1.0, version 1.0.x") - clusterextension.Channel = "candidate-v1.0" - clusterextension.Version = "1.0.x" - clusterextension.Create(oc) - o.Expect(clusterextension.InstalledBundle).To(o.ContainSubstring("v1.0.2")) - clusterextension.Delete(oc) - - g.By("Create clusterextension with channel empty, version >=0.0.1 !=1.1.0 <1.1.2") - clusterextension.Channel = "" - clusterextension.Version = ">=0.0.1 !=1.1.0 <1.1.2" - clusterextension.Template = clusterextensionWithoutChannelTemplate - clusterextension.Create(oc) - o.Expect(clusterextension.InstalledBundle).To(o.ContainSubstring("v1.0.2")) - clusterextension.Delete(oc) - - g.By("Create clusterextension with channel empty, version empty") - clusterextension.Channel = "" - clusterextension.Version = "" - clusterextension.Template = clusterextensionWithoutChannelVersionTemplate - clusterextension.Create(oc) - o.Expect(clusterextension.InstalledBundle).To(o.ContainSubstring("v1.1.0")) - clusterextension.Delete(oc) - - g.By("Create clusterextension with invalid version") - clusterextension.Version = "!1.0.1" - clusterextension.Template = clusterextensionTemplate - err = clusterextension.CreateWithoutCheck(oc) - o.Expect(err).To(o.HaveOccurred()) - - }) - - g.It("PolarionID:74108-[OTP][Skipped:Disconnected][Slow]olm v1 supports legacy upgrade edges", func() { - var ( - caseID = "74108" - labelValue = caseID - baseDir = exutil.FixturePath("testdata", "olm") - clustercatalogTemplate = filepath.Join(baseDir, "clustercatalog-withlabel.yaml") - clusterextensionTemplate = filepath.Join(baseDir, "clusterextension-withselectorlabel-WithoutVersion.yaml") - saClusterRoleBindingTemplate = filepath.Join(baseDir, "sa-admin.yaml") - ns = "ns-74108" - sa = "sa74108" - saCrb = olmv1util.SaCLusterRolebindingDescription{ - Name: sa, - Namespace: ns, - Template: saClusterRoleBindingTemplate, - } - clustercatalog = olmv1util.ClusterCatalogDescription{ - Name: "clustercatalog-74108", - Imageref: "quay.io/openshifttest/nginxolm-operator-index:nginxolm74108", - LabelValue: labelValue, - Template: clustercatalogTemplate, - } - clusterextension = olmv1util.ClusterExtensionDescription{ - Name: "clusterextension-74108", - InstallNamespace: ns, - PackageName: "nginx74108", - Channel: "candidate-v0.0", - LabelValue: labelValue, - SaName: sa, - Template: clusterextensionTemplate, - } - ) - - g.By("Create namespace") - defer func() { - _ = oc.WithoutNamespace().AsAdmin().Run("delete").Args("ns", ns, "--ignore-not-found").Execute() - }() - err := oc.WithoutNamespace().AsAdmin().Run("create").Args("ns", ns).Execute() - o.Expect(err).NotTo(o.HaveOccurred()) - o.Expect(olmv1util.Appearance(oc, exutil.Appear, "ns", ns)).To(o.BeTrue()) - - g.By("Create SA for clusterextension") - defer saCrb.Delete(oc) - saCrb.Create(oc) - - g.By("1) Create clustercatalog") - defer clustercatalog.Delete(oc) - clustercatalog.Create(oc) - - g.By("2) Install clusterextension with channel candidate-v0.0") - defer clusterextension.Delete(oc) - clusterextension.Create(oc) - o.Expect(clusterextension.InstalledBundle).To(o.ContainSubstring("0.0.2")) - - g.By("3) Attempt to update to channel candidate-v2.1 with CatalogProvided policy, that should fail") - clusterextension.Patch(oc, `{"spec":{"source":{"catalog":{"channels": ["candidate-v2.1"]}}}}`) - errWait := wait.PollUntilContextTimeout(context.TODO(), 3*time.Second, 30*time.Second, false, func(ctx context.Context) (bool, error) { - message, _ := olmv1util.GetNoEmpty(oc, "clusterextension", clusterextension.Name, "-o", `jsonpath={.status.conditions[?(@.type=="Progressing")]}`) - if strings.Contains(message, "error upgrading") { - e2e.Logf("status is %s", message) - return true, nil - } - return false, nil - }) - if errWait != nil { - _, _ = olmv1util.GetNoEmpty(oc, "clusterextension", clusterextension.Name, "-o=jsonpath-as-json={.status}") - } - exutil.AssertWaitPollNoErr(errWait, "no error message raised") - - g.By("4) Attempt to update to channel candidate-v0.1 with CatalogProvided policy, that should success") - clusterextension.Patch(oc, `{"spec":{"source":{"catalog":{"channels": ["candidate-v0.1"]}}}}`) - errWait = wait.PollUntilContextTimeout(context.TODO(), 3*time.Second, 150*time.Second, false, func(ctx context.Context) (bool, error) { - clusterextension.GetBundleResource(oc) - if strings.Contains(clusterextension.InstalledBundle, "0.1.0") { - e2e.Logf("InstalledBundle is %s", clusterextension.InstalledBundle) - return true, nil - } - return false, nil - }) - exutil.AssertWaitPollNoErr(errWait, "nginx74108 0.1.0 is not installed") - - g.By("5) Attempt to update to channel candidate-v1.0 with CatalogProvided policy, that should fail") - clusterextension.Patch(oc, `{"spec":{"source":{"catalog":{"channels": ["candidate-v1.0"]}}}}`) - errWait = wait.PollUntilContextTimeout(context.TODO(), 3*time.Second, 30*time.Second, false, func(ctx context.Context) (bool, error) { - message, _ := olmv1util.GetNoEmpty(oc, "clusterextension", clusterextension.Name, "-o", `jsonpath={.status.conditions[?(@.type=="Progressing")]}`) - if strings.Contains(message, "error upgrading") { - e2e.Logf("status is %s", message) - return true, nil - } - return false, nil - }) - exutil.AssertWaitPollNoErr(errWait, "no error message raised") - - g.By("6) update policy to SelfCertified, upgrade should success") - clusterextension.Patch(oc, `{"spec":{"source":{"catalog":{"upgradeConstraintPolicy": "SelfCertified"}}}}`) - errWait = wait.PollUntilContextTimeout(context.TODO(), 3*time.Second, 150*time.Second, false, func(ctx context.Context) (bool, error) { - clusterextension.GetBundleResource(oc) - if strings.Contains(clusterextension.InstalledBundle, "1.0.2") { - e2e.Logf("InstalledBundle is %s", clusterextension.InstalledBundle) - return true, nil - } - return false, nil - }) - exutil.AssertWaitPollNoErr(errWait, "nginx74108 1.0.2 is not installed") - - g.By("7) Attempt to update to channel candidate-v1.1 with CatalogProvided policy, that should success") - clusterextension.Patch(oc, `{"spec":{"source":{"catalog":{"upgradeConstraintPolicy": "CatalogProvided"}}}}`) - clusterextension.Patch(oc, `{"spec":{"source":{"catalog":{"channels": ["candidate-v1.1"]}}}}`) - errWait = wait.PollUntilContextTimeout(context.TODO(), 3*time.Second, 150*time.Second, false, func(ctx context.Context) (bool, error) { - clusterextension.GetBundleResource(oc) - if strings.Contains(clusterextension.InstalledBundle, "1.1.0") { - e2e.Logf("InstalledBundle is %s", clusterextension.InstalledBundle) - return true, nil - } - return false, nil - }) - exutil.AssertWaitPollNoErr(errWait, "nginx74108 0.1.0 is not installed") - - g.By("8) Attempt to update to channel candidate-v1.2 with CatalogProvided policy, that should fail") - clusterextension.Patch(oc, `{"spec":{"source":{"catalog":{"channels": ["candidate-v1.2"]}}}}`) - errWait = wait.PollUntilContextTimeout(context.TODO(), 3*time.Second, 30*time.Second, false, func(ctx context.Context) (bool, error) { - message, _ := olmv1util.GetNoEmpty(oc, "clusterextension", clusterextension.Name, "-o", `jsonpath={.status.conditions[?(@.type=="Progressing")]}`) - if strings.Contains(message, "error upgrading") { - e2e.Logf("status is %s", message) - return true, nil - } - return false, nil - }) - exutil.AssertWaitPollNoErr(errWait, "no error message raised") - - g.By("9) update policy to SelfCertified, upgrade should success") - clusterextension.Patch(oc, `{"spec":{"source":{"catalog":{"upgradeConstraintPolicy": "SelfCertified"}}}}`) - errWait = wait.PollUntilContextTimeout(context.TODO(), 3*time.Second, 150*time.Second, false, func(ctx context.Context) (bool, error) { - clusterextension.GetBundleResource(oc) - if strings.Contains(clusterextension.InstalledBundle, "1.2.0") { - e2e.Logf("InstalledBundle is %s", clusterextension.InstalledBundle) - return true, nil - } - return false, nil - }) - exutil.AssertWaitPollNoErr(errWait, "nginx74108 1.2.0 is not installed") - - g.By("10) Attempt to update to channel candidate-v2.0 with CatalogProvided policy, that should fail") - clusterextension.Patch(oc, `{"spec":{"source":{"catalog":{"upgradeConstraintPolicy": "CatalogProvided"}}}}`) - clusterextension.Patch(oc, `{"spec":{"source":{"catalog":{"channels": ["candidate-v2.0"]}}}}`) - errWait = wait.PollUntilContextTimeout(context.TODO(), 3*time.Second, 30*time.Second, false, func(ctx context.Context) (bool, error) { - message, _ := olmv1util.GetNoEmpty(oc, "clusterextension", clusterextension.Name, "-o", `jsonpath={.status.conditions[?(@.type=="Progressing")]}`) - if strings.Contains(message, "error upgrading") { - e2e.Logf("status is %s", message) - return true, nil - } - return false, nil - }) - exutil.AssertWaitPollNoErr(errWait, "no error message raised") - - g.By("11) Attempt to update to channel candidate-v2.1 with CatalogProvided policy, that should success") - clusterextension.Patch(oc, `{"spec":{"source":{"catalog":{"channels": ["candidate-v2.1"]}}}}`) - errWait = wait.PollUntilContextTimeout(context.TODO(), 3*time.Second, 30*time.Second, false, func(ctx context.Context) (bool, error) { - clusterextension.GetBundleResource(oc) - if strings.Contains(clusterextension.InstalledBundle, "2.1.1") { - e2e.Logf("InstalledBundle is %s", clusterextension.InstalledBundle) - return true, nil - } - return false, nil - }) - exutil.AssertWaitPollNoErr(errWait, "nginx74108 2.1.1 is not installed") - - g.By("8) downgrade to version 1.0.1 with SelfCertified policy, that should work") - clusterextension.Patch(oc, `{"spec":{"source":{"catalog":{"upgradeConstraintPolicy": "SelfCertified"}}}}`) - clusterextension.Patch(oc, `{"spec":{"source":{"catalog":{"channels": ["candidate-v1.0"],"version":"1.0.1"}}}}`) - errWait = wait.PollUntilContextTimeout(context.TODO(), 3*time.Second, 30*time.Second, false, func(ctx context.Context) (bool, error) { - clusterextension.GetBundleResource(oc) - if strings.Contains(clusterextension.InstalledBundle, "1.0.1") { - e2e.Logf("InstalledBundle is %s", clusterextension.InstalledBundle) - return true, nil - } - return false, nil - }) - if errWait != nil { - _, _ = olmv1util.GetNoEmpty(oc, "clusterextension", clusterextension.Name, "-o=jsonpath-as-json={.status}") - } - exutil.AssertWaitPollNoErr(errWait, "nginx74108 1.0.1 is not installed") - - }) - - g.It("PolarionID:74923-[OTP][Skipped:Disconnected]no two ClusterExtensions can manage the same underlying object", func() { - var ( - caseID = "74923" - labelValue = caseID - baseDir = exutil.FixturePath("testdata", "olm") - clustercatalogTemplate = filepath.Join(baseDir, "clustercatalog-withlabel.yaml") - clusterextensionTemplate = filepath.Join(baseDir, "clusterextension-withselectorlabel-WithoutChannelVersion.yaml") - saClusterRoleBindingTemplate = filepath.Join(baseDir, "sa-admin.yaml") - ns1 = "ns-74923-1" - ns2 = "ns-74923-2" - sa1 = "sa74923-1" - sa2 = "sa74923-2" - saCrb1 = olmv1util.SaCLusterRolebindingDescription{ - Name: sa1, - Namespace: ns1, - Template: saClusterRoleBindingTemplate, - } - saCrb2 = olmv1util.SaCLusterRolebindingDescription{ - Name: sa2, - Namespace: ns2, - Template: saClusterRoleBindingTemplate, - } - clustercatalog = olmv1util.ClusterCatalogDescription{ - Name: "clustercatalog-74923-1", - Imageref: "quay.io/openshifttest/nginxolm-operator-index:nginxolm74923", - LabelValue: labelValue, - Template: clustercatalogTemplate, - } - clusterextension1 = olmv1util.ClusterExtensionDescription{ - Name: "clusterextension-74923-1", - PackageName: "nginx74923", - InstallNamespace: ns1, - SaName: sa1, - LabelValue: labelValue, - Template: clusterextensionTemplate, - } - clusterextension2 = olmv1util.ClusterExtensionDescription{ - Name: "clusterextension-74923-2", - PackageName: "nginx74923", - InstallNamespace: ns2, - SaName: sa2, - LabelValue: labelValue, - Template: clusterextensionTemplate, - } - ) - - g.By("1. Create clustercatalog") - defer clustercatalog.Delete(oc) - clustercatalog.Create(oc) - - g.By("2. Create clusterextension1") - g.By("2.1 Create namespace 1") - defer func() { - _ = oc.WithoutNamespace().AsAdmin().Run("delete").Args("ns", ns1, "--ignore-not-found").Execute() - }() - err := oc.WithoutNamespace().AsAdmin().Run("create").Args("ns", ns1).Execute() - o.Expect(err).NotTo(o.HaveOccurred()) - o.Expect(olmv1util.Appearance(oc, exutil.Appear, "ns", ns1)).To(o.BeTrue()) - - g.By("2.2 Create SA for clusterextension1") - defer saCrb1.Delete(oc) - saCrb1.Create(oc) - - g.By("2.3 Create clusterextension1") - defer clusterextension1.Delete(oc) - clusterextension1.Create(oc) - o.Expect(clusterextension1.InstalledBundle).To(o.ContainSubstring("v1.0.2")) - - g.By("3 Create clusterextension2") - g.By("3.1 Create namespace 2") - defer func() { - _ = oc.WithoutNamespace().AsAdmin().Run("delete").Args("ns", ns2, "--ignore-not-found").Execute() - }() - err = oc.WithoutNamespace().AsAdmin().Run("create").Args("ns", ns2).Execute() - o.Expect(err).NotTo(o.HaveOccurred()) - o.Expect(olmv1util.Appearance(oc, exutil.Appear, "ns", ns2)).To(o.BeTrue()) - - g.By("3.2 Create SA for clusterextension2") - defer saCrb2.Delete(oc) - saCrb2.Create(oc) - - g.By("3.3 Create clusterextension2") - defer clusterextension2.Delete(oc) - _ = clusterextension2.CreateWithoutCheck(oc) - errWait := wait.PollUntilContextTimeout(context.TODO(), 3*time.Second, 150*time.Second, false, func(ctx context.Context) (bool, error) { - message, _ := olmv1util.GetNoEmpty(oc, "clusterextension", clusterextension2.Name, "-o", "jsonpath={.status.conditions[*].message}") - if !strings.Contains(message, "already exists in namespace") { - e2e.Logf("status is %s", message) - return false, nil - } - return true, nil - }) - exutil.AssertWaitPollNoErr(errWait, "clusterextension2 should not be installed") - clusterextension2.Delete(oc) - clusterextension1.Delete(oc) - errWait = wait.PollUntilContextTimeout(context.TODO(), 3*time.Second, 30*time.Second, false, func(ctx context.Context) (bool, error) { - status, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("crd", "nginxolm74923s.cache.example.com").Output() - if !strings.Contains(status, "NotFound") { - e2e.Logf("crd status: %s", status) - return false, nil - } - return true, nil - }) - exutil.AssertWaitPollNoErr(errWait, "crd nginxolm74923s.cache.example.com is not deleted") - - g.By("4 Create crd") - crdFilePath := filepath.Join(baseDir, "crd-nginxolm74923.yaml") - defer func() { - _, _ = oc.AsAdmin().WithoutNamespace().Run("delete").Args("crd", "nginxolm74923s.cache.example.com").Output() - }() - _, _ = oc.AsAdmin().WithoutNamespace().Run("apply").Args("-f", crdFilePath).Output() - errWait = wait.PollUntilContextTimeout(context.TODO(), 3*time.Second, 30*time.Second, false, func(ctx context.Context) (bool, error) { - status, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("crd", "nginxolm74923s.cache.example.com").Output() - if strings.Contains(status, "NotFound") { - e2e.Logf("crd status: %s", status) - return false, nil - } - return true, nil - }) - exutil.AssertWaitPollNoErr(errWait, "crd nginxolm74923s.cache.example.com is not deleted") - - _ = clusterextension1.CreateWithoutCheck(oc) - errWait = wait.PollUntilContextTimeout(context.TODO(), 3*time.Second, 150*time.Second, false, func(ctx context.Context) (bool, error) { - message, _ := olmv1util.GetNoEmpty(oc, "clusterextension", clusterextension1.Name, "-o", "jsonpath={.status.conditions[*].message}") - if !strings.Contains(message, "already exists in namespace") { - e2e.Logf("status is %s", message) - return false, nil - } - return true, nil - }) - exutil.AssertWaitPollNoErr(errWait, "clusterextension1 should not be installed") - - }) - - g.It("PolarionID:75501-[OTP][Skipped:Disconnected]the updates of various status fields is orthogonal", func() { - var ( - caseID = "75501" - labelValue = caseID - baseDir = exutil.FixturePath("testdata", "olm") - clustercatalogTemplate = filepath.Join(baseDir, "clustercatalog-withlabel.yaml") - clusterextensionTemplate = filepath.Join(baseDir, "clusterextension-withselectorlabel.yaml") - saClusterRoleBindingTemplate = filepath.Join(baseDir, "sa-admin.yaml") - ns = "ns-75501" - sa = "sa75501" - saCrb = olmv1util.SaCLusterRolebindingDescription{ - Name: sa, - Namespace: ns, - Template: saClusterRoleBindingTemplate, - } - clustercatalog = olmv1util.ClusterCatalogDescription{ - Name: "clustercatalog-75501", - Imageref: "quay.io/openshifttest/nginxolm-operator-index:nginxolm75501", - LabelValue: labelValue, - Template: clustercatalogTemplate, - } - clusterextension = olmv1util.ClusterExtensionDescription{ - Name: "clusterextension-75501", - InstallNamespace: ns, - PackageName: "nginx75501", - Channel: "candidate-v2.1", - Version: "2.1.0", - SaName: sa, - LabelValue: labelValue, - Template: clusterextensionTemplate, - } - ) - - g.By("Create namespace") - defer func() { - _ = oc.WithoutNamespace().AsAdmin().Run("delete").Args("ns", ns, "--ignore-not-found").Execute() - }() - err := oc.WithoutNamespace().AsAdmin().Run("create").Args("ns", ns).Execute() - o.Expect(err).NotTo(o.HaveOccurred()) - o.Expect(olmv1util.Appearance(oc, exutil.Appear, "ns", ns)).To(o.BeTrue()) - - g.By("Create SA for clusterextension") - defer saCrb.Delete(oc) - saCrb.Create(oc) - - g.By("Create clustercatalog") - defer clustercatalog.Delete(oc) - clustercatalog.Create(oc) - - g.By("Create clusterextension with channel candidate-v2.1, version 2.1.0") - defer clusterextension.Delete(oc) - clusterextension.Create(oc) - _, _ = olmv1util.GetNoEmpty(oc, "clusterextension", clusterextension.Name, "-o=jsonpath-as-json={.status}") - reason, _ := olmv1util.GetNoEmpty(oc, "clusterextension", clusterextension.Name, "-o", `jsonpath={.status.conditions[?(@.type=="Progressing")].reason}`) - o.Expect(reason).To(o.ContainSubstring("Succeeded")) - status, _ := olmv1util.GetNoEmpty(oc, "clusterextension", clusterextension.Name, "-o", `jsonpath={.status.conditions[?(@.type=="Installed")].status}`) - o.Expect(status).To(o.ContainSubstring("True")) - reason, _ = olmv1util.GetNoEmpty(oc, "clusterextension", clusterextension.Name, "-o", `jsonpath={.status.conditions[?(@.type=="Installed")].reason}`) - o.Expect(reason).To(o.ContainSubstring("Succeeded")) - installedBundleVersion, _ := olmv1util.GetNoEmpty(oc, "clusterextension", clusterextension.Name, "-o", `jsonpath={.status.install.bundle.version}`) - o.Expect(installedBundleVersion).To(o.ContainSubstring("2.1.0")) - installedBundleName, _ := olmv1util.GetNoEmpty(oc, "clusterextension", clusterextension.Name, "-o", `jsonpath={.status.install.bundle.name}`) - o.Expect(installedBundleName).To(o.ContainSubstring("nginx75501.v2.1.0")) - resolvedBundleVersion, _ := olmv1util.GetNoEmpty(oc, "clusterextension", clusterextension.Name, "-o", `jsonpath={.status.install.bundle.version}`) - o.Expect(resolvedBundleVersion).To(o.ContainSubstring("2.1.0")) - resolvedBundleName, _ := olmv1util.GetNoEmpty(oc, "clusterextension", clusterextension.Name, "-o", `jsonpath={.status.install.bundle.name}`) - o.Expect(resolvedBundleName).To(o.ContainSubstring("nginx75501.v2.1.0")) - - clusterextension.Delete(oc) - - g.By("Test UnpackFailed, bundle image cannot be pulled successfully") - clusterextension.Channel = "candidate-v2.0" - clusterextension.Version = "2.0.0" - _ = clusterextension.CreateWithoutCheck(oc) - errWait := wait.PollUntilContextTimeout(context.TODO(), 3*time.Second, 150*time.Second, false, func(ctx context.Context) (bool, error) { - unpackedReason, _ := olmv1util.GetNoEmpty(oc, "clusterextension", clusterextension.Name, "-o", `jsonpath={.status.conditions[?(@.type=="Progressing")].reason}`) - unpackedMessage, _ := olmv1util.GetNoEmpty(oc, "clusterextension", clusterextension.Name, "-o", `jsonpath={.status.conditions[?(@.type=="Progressing")].message}`) - if !strings.Contains(unpackedReason, "Retrying") || !strings.Contains(unpackedMessage, "manifest unknown") { - return false, nil - } - return true, nil - }) - if errWait != nil { - _, _ = olmv1util.GetNoEmpty(oc, "clusterextension", clusterextension.Name, "-o=jsonpath-as-json={.status}") - exutil.AssertWaitPollNoErr(errWait, "clusterextension status is not correct") - } - clusterextension.Delete(oc) - - g.By("Test ResolutionFailed, wrong version") - clusterextension.Version = "3.0.0" - _ = clusterextension.CreateWithoutCheck(oc) - errWait = wait.PollUntilContextTimeout(context.TODO(), 3*time.Second, 30*time.Second, false, func(ctx context.Context) (bool, error) { - resolvedReason, _ := olmv1util.GetNoEmpty(oc, "clusterextension", clusterextension.Name, "-o", `jsonpath={.status.conditions[?(@.type=="Progressing")].reason}`) - resolvedMessage, _ := olmv1util.GetNoEmpty(oc, "clusterextension", clusterextension.Name, "-o", `jsonpath={.status.conditions[?(@.type=="Progressing")].message}`) - if !strings.Contains(resolvedReason, "Retrying") || !strings.Contains(resolvedMessage, "no bundles found for package") { - return false, nil - } - return true, nil - }) - if errWait != nil { - _, _ = olmv1util.GetNoEmpty(oc, "clusterextension", clusterextension.Name, "-o=jsonpath-as-json={.status}") - exutil.AssertWaitPollNoErr(errWait, "clusterextension status is not correct") - } - clusterextension.Delete(oc) - - g.By("Test ResolutionFailed, no package") - clusterextension.PackageName = "nginxfake" - _ = clusterextension.CreateWithoutCheck(oc) - errWait = wait.PollUntilContextTimeout(context.TODO(), 3*time.Second, 30*time.Second, false, func(ctx context.Context) (bool, error) { - resolvedReason, _ := olmv1util.GetNoEmpty(oc, "clusterextension", clusterextension.Name, "-o", `jsonpath={.status.conditions[?(@.type=="Progressing")].reason}`) - resolvedMessage, _ := olmv1util.GetNoEmpty(oc, "clusterextension", clusterextension.Name, "-o", `jsonpath={.status.conditions[?(@.type=="Progressing")].message}`) - if !strings.Contains(resolvedReason, "Retrying") || !strings.Contains(resolvedMessage, "no bundles found for package") { - return false, nil - } - return true, nil - }) - if errWait != nil { - _, _ = olmv1util.GetNoEmpty(oc, "clusterextension", clusterextension.Name, "-o=jsonpath-as-json={.status}") - exutil.AssertWaitPollNoErr(errWait, "clusterextension status is not correct") - } - - }) - - g.It("PolarionID:76685-[OTP][Skipped:Disconnected]olm v1 supports selecting catalogs [Serial]", func() { - var ( - baseDir = exutil.FixturePath("testdata", "olm") - clustercatalogTemplate = filepath.Join(baseDir, "clustercatalog-withlabel.yaml") - clusterextensionTemplate = filepath.Join(baseDir, "clusterextensionWithoutChannelVersion.yaml") - clusterextensionLabelTemplate = filepath.Join(baseDir, "clusterextension-withselectorlabel-WithoutChannelVersion.yaml") - clusterextensionExpressionsTemplate = filepath.Join(baseDir, "clusterextension-withselectorExpressions-WithoutChannelVersion.yaml") - clusterextensionLableExpressionsTemplate = filepath.Join(baseDir, "clusterextension-withselectorLableExpressions-WithoutChannelVersion.yaml") - - saClusterRoleBindingTemplate = filepath.Join(baseDir, "sa-admin.yaml") - ns = "ns-76685" - sa = "sa76685" - saCrb = olmv1util.SaCLusterRolebindingDescription{ - Name: sa, - Namespace: ns, - Template: saClusterRoleBindingTemplate, - } - clustercatalog1 = olmv1util.ClusterCatalogDescription{ - LabelKey: "olmv1-test", - LabelValue: "ocp-76685-1", - Name: "clustercatalog-76685-1", - Imageref: "quay.io/openshifttest/nginxolm-operator-index:nginx76685v1", - Template: clustercatalogTemplate, - } - clustercatalog2 = olmv1util.ClusterCatalogDescription{ - LabelKey: "olmv1-test", - LabelValue: "ocp-76685-2", - Name: "clustercatalog-76685-2", - Imageref: "quay.io/openshifttest/nginxolm-operator-index:nginx76685v2", - Template: clustercatalogTemplate, - } - clustercatalog3 = olmv1util.ClusterCatalogDescription{ - LabelKey: "olmv1-test", - LabelValue: "ocp-76685-3", - Name: "clustercatalog-76685-3", - Imageref: "quay.io/openshifttest/nginxolm-operator-index:nginx76685v3", - Template: clustercatalogTemplate, - } - clusterextension = olmv1util.ClusterExtensionDescription{ - Name: "clusterextension-76685", - InstallNamespace: ns, - PackageName: "nginx76685", - SaName: sa, - Template: clusterextensionTemplate, - } - ) - - g.By("1) Create namespace, sa, clustercatalog1 and clustercatalog2") - defer func() { - _ = oc.WithoutNamespace().AsAdmin().Run("delete").Args("ns", ns, "--ignore-not-found").Execute() - }() - err := oc.WithoutNamespace().AsAdmin().Run("create").Args("ns", ns).Execute() - o.Expect(err).NotTo(o.HaveOccurred()) - o.Expect(olmv1util.Appearance(oc, exutil.Appear, "ns", ns)).To(o.BeTrue()) - - defer saCrb.Delete(oc) - saCrb.Create(oc) - - defer clustercatalog1.Delete(oc) - clustercatalog1.Create(oc) - defer clustercatalog2.Delete(oc) - clustercatalog2.Create(oc) - - g.By("2) 2 clustercatalogs with same priority, install clusterextension, selector of clusterextension is empty") - defer clusterextension.Delete(oc) - _ = clusterextension.CreateWithoutCheck(oc) - errWait := wait.PollUntilContextTimeout(context.TODO(), 3*time.Second, 30*time.Second, false, func(ctx context.Context) (bool, error) { - message, _ := olmv1util.GetNoEmpty(oc, "clusterextension", clusterextension.Name, "-o", `jsonpath={.status.conditions[?(@.type=="Progressing")]}`) - if strings.Contains(message, "multiple catalogs with the same priority") { - e2e.Logf("status is %s", message) - return true, nil - } - return false, nil - }) - if errWait != nil { - _, _ = olmv1util.GetNoEmpty(oc, "clusterextension", clusterextension.Name, "-o=jsonpath-as-json={.status}") - } - exutil.AssertWaitPollNoErr(errWait, "no error message raised") - clusterextension.Delete(oc) - - g.By("3) 2 clustercatalogs with same priority, install clusterextension, selector of clusterextension is not empty") - clusterextension.Template = clusterextensionLabelTemplate - clusterextension.LabelKey = "olm.operatorframework.io/metadata.name" - clusterextension.LabelValue = clustercatalog1.Name - clusterextension.Create(oc) - clusterextension.WaitClusterExtensionVersion(oc, "v1.0.1") - clusterextension.Delete(oc) - - g.By("4) Install 2 clustercatalogs with different priorities, and the selector of clusterextension is empty") - clustercatalog1.Patch(oc, `{"spec":{"priority": 100}}`) - clustercatalog2.Patch(oc, `{"spec":{"priority": 1000}}`) - clusterextension.Template = clusterextensionTemplate - clusterextension.Create(oc) - clusterextension.WaitClusterExtensionVersion(oc, "v2.0.0") - clusterextension.Delete(oc) - - g.By("5) Install 2 clustercatalogs with different priorities, and the selector of clusterextension is not empty") - clusterextension.Template = clusterextensionLabelTemplate - clusterextension.LabelKey = "olm.operatorframework.io/metadata.name" - clusterextension.LabelValue = clustercatalog1.Name - clusterextension.Create(oc) - clusterextension.WaitClusterExtensionVersion(oc, "v1.0.1") - - g.By("6) add ClusterCatalog 3, and modify the selector of clusterextension to use ClusterCatalog 3") - defer clustercatalog3.Delete(oc) - clustercatalog3.Create(oc) - clusterextension.LabelKey = clustercatalog3.LabelKey - clusterextension.LabelValue = clustercatalog3.LabelValue - clusterextension.Create(oc) - clusterextension.WaitClusterExtensionVersion(oc, "v3.0.0") - clusterextension.Delete(oc) - - g.By("7) matchExpressions") - clusterextension.Template = clusterextensionExpressionsTemplate - clusterextension.ExpressionsKey = clustercatalog3.LabelKey - clusterextension.ExpressionsOperator = "NotIn" - clusterextension.ExpressionsValue1 = clustercatalog3.LabelValue - clusterextension.Create(oc) - clusterextension.WaitClusterExtensionVersion(oc, "v2.0.0") - - g.By("8) test both matchLabels and matchExpressions") - clusterextension.Template = clusterextensionLableExpressionsTemplate - clusterextension.LabelKey = "olm.operatorframework.io/metadata.name" - clusterextension.LabelValue = clustercatalog3.Name - clusterextension.ExpressionsKey = clustercatalog3.LabelKey - clusterextension.ExpressionsOperator = "In" - clusterextension.ExpressionsValue1 = clustercatalog1.LabelValue - clusterextension.ExpressionsValue2 = clustercatalog2.LabelValue - clusterextension.ExpressionsValue3 = clustercatalog3.LabelValue - clusterextension.Create(oc) - clusterextension.WaitClusterExtensionVersion(oc, "v3.0.0") - - }) - - g.It("PolarionID:77972-[OTP][Skipped:Disconnected]olm v1 Supports MaxOCPVersion in properties file", func() { - var ( - caseID = "77972" - labelValue = caseID - baseDir = exutil.FixturePath("testdata", "olm") - clustercatalogTemplate = filepath.Join(baseDir, "clustercatalog-withlabel.yaml") - clusterextensionTemplate = filepath.Join(baseDir, "clusterextension-withselectorlabel-WithoutChannel.yaml") - saClusterRoleBindingTemplate = filepath.Join(baseDir, "sa-admin.yaml") - ns = "ns-77972" - sa = "sa77972" - saCrb = olmv1util.SaCLusterRolebindingDescription{ - Name: sa, - Namespace: ns, - Template: saClusterRoleBindingTemplate, - } - clustercatalog = olmv1util.ClusterCatalogDescription{ - LabelKey: "olmv1-test", - LabelValue: labelValue, - Name: "clustercatalog-77972", - Imageref: "quay.io/openshifttest/nginxolm-operator-index:nginxolm77972", - Template: clustercatalogTemplate, - } - - clusterextension = olmv1util.ClusterExtensionDescription{ - Name: "clusterextension-77972", - InstallNamespace: ns, - PackageName: "nginx77972", - SaName: sa, - Version: "0.0.1", - LabelValue: labelValue, - Template: clusterextensionTemplate, - } - ) - - g.By("1) Create namespace, sa, clustercatalog1 and clustercatalog2") - defer func() { - _ = oc.WithoutNamespace().AsAdmin().Run("delete").Args("ns", ns, "--ignore-not-found").Execute() - }() - err := oc.WithoutNamespace().AsAdmin().Run("create").Args("ns", ns).Execute() - o.Expect(err).NotTo(o.HaveOccurred()) - o.Expect(olmv1util.Appearance(oc, exutil.Appear, "ns", ns)).To(o.BeTrue()) - - defer saCrb.Delete(oc) - saCrb.Create(oc) - - defer clustercatalog.Delete(oc) - clustercatalog.Create(oc) - - g.By("2) install clusterextension, version 0.0.1, without setting olm.maxOpenShiftVersion") - defer clusterextension.Delete(oc) - clusterextension.Create(oc) - o.Expect(clusterextension.InstalledBundle).To(o.ContainSubstring("v0.0.1")) - status, _ := olmv1util.GetNoEmpty(oc, "co", "olm", "-o", `jsonpath={.status.conditions[?(@.type=="Upgradeable")].status}`) - o.Expect(status).To(o.ContainSubstring("True")) - message, _ := olmv1util.GetNoEmpty(oc, "co", "olm", "-o", `jsonpath={.status.conditions[?(@.type=="Upgradeable")].message}`) - o.Expect(message).To(o.ContainSubstring("All is well")) - - g.By("3) upgrade clusterextension to 1.1.0, olm.maxOpenShiftVersion is 4.19") - clusterextension.Patch(oc, `{"spec":{"source":{"catalog":{"version":"1.1.0"}}}}`) - errWait := wait.PollUntilContextTimeout(context.TODO(), 3*time.Second, 60*time.Second, false, func(ctx context.Context) (bool, error) { - message, _ := olmv1util.GetNoEmpty(oc, "co", "olm", "-o", `jsonpath={.status.conditions[?(@.type=="Upgradeable")].message}`) - if strings.Contains(message, "InstalledOLMOperatorsUpgradeable") && strings.Contains(message, "nginx77972.v1.1.0") { - e2e.Logf("status is %s", message) - return true, nil - } - return false, nil - }) - status, _ = olmv1util.GetNoEmpty(oc, "co", "olm", "-o", `jsonpath={.status.conditions[?(@.type=="Upgradeable")].status}`) - o.Expect(status).To(o.ContainSubstring("False")) - if errWait != nil { - _, _ = olmv1util.GetNoEmpty(oc, "co", "olm", "-o=jsonpath-as-json={.status.conditions}") - } - exutil.AssertWaitPollNoErr(errWait, "Upgradeable message is not correct") - - g.By("4) upgrade clusterextension to 1.2.0, olm.maxOpenShiftVersion is 4.20") - clusterextension.Patch(oc, `{"spec":{"source":{"catalog":{"version":"1.2.0"}}}}`) - errWait = wait.PollUntilContextTimeout(context.TODO(), 3*time.Second, 60*time.Second, false, func(ctx context.Context) (bool, error) { - message, _ := olmv1util.GetNoEmpty(oc, "co", "olm", "-o", `jsonpath={.status.conditions[?(@.type=="Upgradeable")].message}`) - if strings.Contains(message, "InstalledOLMOperatorsUpgradeable") && strings.Contains(message, "nginx77972.v1.2.0") { - e2e.Logf("status is %s", message) - return true, nil - } - return false, nil - }) - status, _ = olmv1util.GetNoEmpty(oc, "co", "olm", "-o", `jsonpath={.status.conditions[?(@.type=="Upgradeable")].status}`) - o.Expect(status).To(o.ContainSubstring("False")) - if errWait != nil { - _, _ = olmv1util.GetNoEmpty(oc, "co", "olm", "-o=jsonpath-as-json={.status.conditions}") - } - exutil.AssertWaitPollNoErr(errWait, "Upgradeable message is not correct") - }) - - g.It("PolarionID:82249-[OTP][Skipped:Disconnected]Verify olmv1 support for float type maxOCPVersion in properties file", func() { - var ( - caseID = "82249" - labelValue = caseID - baseDir = exutil.FixturePath("testdata", "olm") - clustercatalogTemplate = filepath.Join(baseDir, "clustercatalog-withlabel.yaml") - clusterextensionTemplate = filepath.Join(baseDir, "clusterextension-withselectorlabel-WithoutChannel.yaml") - saClusterRoleBindingTemplate = filepath.Join(baseDir, "sa-admin.yaml") - ns = "ns-82249" - sa = "sa82249" - saCrb = olmv1util.SaCLusterRolebindingDescription{ - Name: sa, - Namespace: ns, - Template: saClusterRoleBindingTemplate, - } - clustercatalog = olmv1util.ClusterCatalogDescription{ - LabelKey: "olmv1-test", - Name: "clustercatalog-82249", - LabelValue: labelValue, - Imageref: "quay.io/openshifttest/nginxolm-operator-index:nginxolm82249", - Template: clustercatalogTemplate, - } - - clusterextension = olmv1util.ClusterExtensionDescription{ - Name: "clusterextension-82249", - InstallNamespace: ns, - PackageName: "nginx82249", - SaName: sa, - Version: "0.0.1", - LabelValue: labelValue, - Template: clusterextensionTemplate, - } - ) - - g.By("1) Create namespace, sa, clustercatalog") - defer func() { - _ = oc.WithoutNamespace().AsAdmin().Run("delete").Args("ns", ns, "--ignore-not-found").Execute() - }() - err := oc.WithoutNamespace().AsAdmin().Run("create").Args("ns", ns).Execute() - o.Expect(err).NotTo(o.HaveOccurred()) - o.Expect(olmv1util.Appearance(oc, exutil.Appear, "ns", ns)).To(o.BeTrue()) - - defer saCrb.Delete(oc) - saCrb.Create(oc) - - defer clustercatalog.Delete(oc) - clustercatalog.Create(oc) - - g.By("2) install clusterextension, version 0.0.1, without setting olm.maxOpenShiftVersion") - defer clusterextension.Delete(oc) - clusterextension.Create(oc) - o.Expect(clusterextension.InstalledBundle).To(o.ContainSubstring("v0.0.1")) - status, _ := olmv1util.GetNoEmpty(oc, "co", "olm", "-o", `jsonpath={.status.conditions[?(@.type=="Upgradeable")].status}`) - o.Expect(status).To(o.ContainSubstring("True")) - message, _ := olmv1util.GetNoEmpty(oc, "co", "olm", "-o", `jsonpath={.status.conditions[?(@.type=="Upgradeable")].message}`) - o.Expect(message).To(o.ContainSubstring("All is well")) - - g.By("3) upgrade clusterextension to 1.2.0, olm.maxOpenShiftVersion is 4.20") - clusterextension.Patch(oc, `{"spec":{"source":{"catalog":{"version":"1.2.0"}}}}`) - errWait := wait.PollUntilContextTimeout(context.TODO(), 3*time.Second, 60*time.Second, false, func(ctx context.Context) (bool, error) { - message, _ := olmv1util.GetNoEmpty(oc, "co", "olm", "-o", `jsonpath={.status.conditions[?(@.type=="Upgradeable")].message}`) - if strings.Contains(message, "InstalledOLMOperatorsUpgradeable") && strings.Contains(message, "nginx82249.v1.2.0") { - e2e.Logf("status is %s", message) - return true, nil - } - return false, nil - }) - status, _ = olmv1util.GetNoEmpty(oc, "co", "olm", "-o", `jsonpath={.status.conditions[?(@.type=="Upgradeable")].status}`) - o.Expect(status).To(o.ContainSubstring("False")) - if errWait != nil { - _, _ = olmv1util.GetNoEmpty(oc, "co", "olm", "-o=jsonpath-as-json={.status.conditions}") - } - exutil.AssertWaitPollNoErr(errWait, "Upgradeable message is not correct") - - g.By("4) upgrade clusterextension to 1.3.0, olm.maxOpenShiftVersion is 4.21") - clusterextension.Patch(oc, `{"spec":{"source":{"catalog":{"version":"1.3.0"}}}}`) - errWait = wait.PollUntilContextTimeout(context.TODO(), 3*time.Second, 60*time.Second, false, func(ctx context.Context) (bool, error) { - message, _ := olmv1util.GetNoEmpty(oc, "co", "olm", "-o", `jsonpath={.status.conditions[?(@.type=="Upgradeable")].message}`) - if strings.Contains(message, "InstalledOLMOperatorsUpgradeable") && strings.Contains(message, "nginx82249.v1.3.0") { - e2e.Logf("status is %s", message) - return true, nil - } - return false, nil - }) - status, _ = olmv1util.GetNoEmpty(oc, "co", "olm", "-o", `jsonpath={.status.conditions[?(@.type=="Upgradeable")].status}`) - o.Expect(status).To(o.ContainSubstring("False")) - if errWait != nil { - _, _ = olmv1util.GetNoEmpty(oc, "co", "olm", "-o=jsonpath-as-json={.status.conditions}") - } - exutil.AssertWaitPollNoErr(errWait, "Upgradeable message is not correct") - - g.By("5) Test PASS") - - }) - - g.It("PolarionID:80117-[OTP][Skipped:Disconnected] Single Namespace Install Mode should be supported", func() { - if !olmv1util.IsFeaturegateEnabled(oc, "NewOLMOwnSingleNamespace") { - g.Skip("SingleOwnNamespaceInstallSupport is not enable, so skip it") - } - var ( - caseID = "80117" - labelValue = caseID - baseDir = exutil.FixturePath("testdata", "olm") - clustercatalogTemplate = filepath.Join(baseDir, "clustercatalog-withlabel.yaml") - clusterextensionOwnSingleTemplate = filepath.Join(baseDir, "clusterextension-withselectorlabel-withoutChannel-OwnSingle.yaml") - clusterextensionTemplate = filepath.Join(baseDir, "clusterextension-withselectorlabel-WithoutChannel.yaml") - - saClusterRoleBindingTemplate = filepath.Join(baseDir, "sa-admin.yaml") - ns = "ns-80117" - nsWatch = "ns-80117-watch" - sa = "sa80117" - saCrb = olmv1util.SaCLusterRolebindingDescription{ - Name: sa, - Namespace: ns, - Template: saClusterRoleBindingTemplate, - } - clustercatalog = olmv1util.ClusterCatalogDescription{ - LabelKey: "olmv1-test", - LabelValue: labelValue, - Name: "clustercatalog-80117", - Imageref: "quay.io/openshifttest/nginxolm-operator-index:nginxolm80117", - Template: clustercatalogTemplate, - } - - clusterextension = olmv1util.ClusterExtensionDescription{ - Name: "clusterextension-80117", - InstallNamespace: ns, - PackageName: "nginx80117", - SaName: sa, - Version: "1.0.1", - WatchNamespace: nsWatch, - LabelValue: labelValue, - Template: clusterextensionOwnSingleTemplate, - } - clusterextensionAllNs = olmv1util.ClusterExtensionDescription{ - Name: "clusterextension-80117", - InstallNamespace: ns, - PackageName: "nginx80117", - SaName: sa, - Version: "1.1.0", - LabelValue: labelValue, - Template: clusterextensionTemplate, - } - ) - - g.By("1) Create namespace, sa, clustercatalog") - defer func() { - _ = oc.WithoutNamespace().AsAdmin().Run("delete").Args("ns", ns, "--ignore-not-found").Execute() - }() - err := oc.WithoutNamespace().AsAdmin().Run("create").Args("ns", ns).Execute() - o.Expect(err).NotTo(o.HaveOccurred()) - o.Expect(olmv1util.Appearance(oc, exutil.Appear, "ns", ns)).To(o.BeTrue()) - - defer saCrb.Delete(oc) - saCrb.Create(oc) - - defer clustercatalog.Delete(oc) - clustercatalog.Create(oc) - - g.By("2) install clusterextension, version 1.0.1, without creating watch ns") - defer clusterextension.Delete(oc) - _ = clusterextension.CreateWithoutCheck(oc) - errWait := wait.PollUntilContextTimeout(context.TODO(), 3*time.Second, 150*time.Second, false, func(ctx context.Context) (bool, error) { - message, _ := olmv1util.GetNoEmpty(oc, "clusterextension", clusterextension.Name, "-o", "jsonpath={.status.conditions[*].message}") - if !strings.Contains(message, "failed to create resource") { - e2e.Logf("status is %s", message) - return false, nil - } - return true, nil - }) - exutil.AssertWaitPollNoErr(errWait, "status is not correct") - clusterextension.Delete(oc) - - g.By("3) create watch ns") - defer func() { - _ = oc.WithoutNamespace().AsAdmin().Run("delete").Args("ns", nsWatch, "--ignore-not-found").Execute() - }() - err = oc.WithoutNamespace().AsAdmin().Run("create").Args("ns", nsWatch).Execute() - o.Expect(err).NotTo(o.HaveOccurred()) - o.Expect(olmv1util.Appearance(oc, exutil.Appear, "ns", nsWatch)).To(o.BeTrue()) - - g.By("4) create clusterextension, version 1.0.1") - clusterextension.Create(oc) - o.Expect(clusterextension.InstalledBundle).To(o.ContainSubstring("v1.0.1")) - - g.By("4.1) check deployment") - deploymentTargetNS, _ := olmv1util.GetNoEmpty(oc, "deployment", "nginx80117-controller-manager", "-n", ns, "-o", `jsonpath={.spec.template.metadata.annotations.olm\.targetNamespaces}`) - o.Expect(deploymentTargetNS).To(o.ContainSubstring(nsWatch)) - g.By("4.2) check rolebinding") - rdNS, _ := olmv1util.GetNoEmpty(oc, "rolebinding", "-l", "olm.operatorframework.io/owner-name="+clusterextension.Name, "-n", nsWatch, "-o", `jsonpath={..subjects[].namespace}`) - o.Expect(rdNS).To(o.ContainSubstring(ns)) - - g.By("5) upgrade clusterextension to 1.0.2, v1.0.2 only support singleNamespace") - clusterextension.Patch(oc, `{"spec":{"source":{"catalog":{"version":"1.0.2"}}}}`) - errWait = wait.PollUntilContextTimeout(context.TODO(), 3*time.Second, 150*time.Second, false, func(ctx context.Context) (bool, error) { - clusterextension.GetBundleResource(oc) - if strings.Contains(clusterextension.InstalledBundle, "1.0.2") { - e2e.Logf("InstalledBundle is %s", clusterextension.InstalledBundle) - return true, nil - } - return false, nil - }) - exutil.AssertWaitPollNoErr(errWait, "nginx80117 1.0.2 is not installed") - g.By("5.1) check deployment") - deploymentTargetNS, _ = olmv1util.GetNoEmpty(oc, "deployment", "nginx80117-controller-manager", "-n", ns, "-o", `jsonpath={.spec.template.metadata.annotations.olm\.targetNamespaces}`) - o.Expect(deploymentTargetNS).To(o.ContainSubstring(nsWatch)) - g.By("5.2) check rolebinding") - rdNS, _ = olmv1util.GetNoEmpty(oc, "rolebinding", "-l", "olm.operatorframework.io/owner-name="+clusterextension.Name, "-n", nsWatch, "-o", `jsonpath={..subjects[].namespace}`) - o.Expect(rdNS).To(o.ContainSubstring(ns)) - - g.By("6) upgrade clusterextension to 1.1.0, support allnamespace") - clusterextensionAllNs.Create(oc) - errWait = wait.PollUntilContextTimeout(context.TODO(), 3*time.Second, 150*time.Second, false, func(ctx context.Context) (bool, error) { - clusterextension.GetBundleResource(oc) - if strings.Contains(clusterextension.InstalledBundle, "1.1.0") { - e2e.Logf("InstalledBundle is %s", clusterextension.InstalledBundle) - return true, nil - } - return false, nil - }) - exutil.AssertWaitPollNoErr(errWait, "nginx80117 1.1.0 is not installed") - g.By("6.1) check deployment") - deploymentTargetNS, _ = oc.AsAdmin().WithoutNamespace().Run("get").Args("deployment", "nginx80117-controller-manager", "-n", ns, "-o", `jsonpath={.spec.template.metadata.annotations.olm\.targetNamespaces}`).Output() - o.Expect(deploymentTargetNS).To(o.BeEmpty()) - g.By("6.2) check rolebinding") - rdNS, _ = oc.AsAdmin().WithoutNamespace().Run("get").Args("rolebinding", "-l", "olm.operatorframework.io/owner-name="+clusterextension.Name, "-n", nsWatch).Output() - o.Expect(rdNS).To(o.ContainSubstring("No resources found")) - - g.By("7) upgrade clusterextension to 2.0.0, support singleNamespace") - clusterextension.Version = "2.0.0" - clusterextension.Create(oc) - errWait = wait.PollUntilContextTimeout(context.TODO(), 3*time.Second, 150*time.Second, false, func(ctx context.Context) (bool, error) { - clusterextension.GetBundleResource(oc) - if strings.Contains(clusterextension.InstalledBundle, "2.0.0") { - e2e.Logf("InstalledBundle is %s", clusterextension.InstalledBundle) - return true, nil - } - return false, nil - }) - exutil.AssertWaitPollNoErr(errWait, "nginx80117 2.0.0 is not installed") - g.By("7.1) check deployment") - deploymentTargetNS, _ = olmv1util.GetNoEmpty(oc, "deployment", "nginx80117-controller-manager", "-n", ns, "-o", `jsonpath={.spec.template.metadata.annotations.olm\.targetNamespaces}`) - o.Expect(deploymentTargetNS).To(o.ContainSubstring(nsWatch)) - g.By("7.2) check rolebinding") - rdNS, _ = olmv1util.GetNoEmpty(oc, "rolebinding", "-l", "olm.operatorframework.io/owner-name="+clusterextension.Name, "-n", nsWatch, "-o", `jsonpath={..subjects[].namespace}`) - o.Expect(rdNS).To(o.ContainSubstring(ns)) - - g.By("8) check not support install two same clusterextensions") - ns2 := ns + "-2" - nsWatch2 := nsWatch + "-2" - sa2 := "sa80117-2" - saCrb2 := olmv1util.SaCLusterRolebindingDescription{ - Name: sa2, - Namespace: ns2, - Template: saClusterRoleBindingTemplate, - } - - defer func() { - _ = oc.WithoutNamespace().AsAdmin().Run("delete").Args("ns", ns2, "--ignore-not-found").Execute() - }() - err = oc.WithoutNamespace().AsAdmin().Run("create").Args("ns", ns2).Execute() - o.Expect(err).NotTo(o.HaveOccurred()) - o.Expect(olmv1util.Appearance(oc, exutil.Appear, "ns", ns2)).To(o.BeTrue()) - defer func() { - _ = oc.WithoutNamespace().AsAdmin().Run("delete").Args("ns", nsWatch2, "--ignore-not-found").Execute() - }() - err = oc.WithoutNamespace().AsAdmin().Run("create").Args("ns", nsWatch2).Execute() - o.Expect(err).NotTo(o.HaveOccurred()) - o.Expect(olmv1util.Appearance(oc, exutil.Appear, "ns", nsWatch2)).To(o.BeTrue()) - - defer saCrb2.Delete(oc) - saCrb2.Create(oc) - clusterextension2 := olmv1util.ClusterExtensionDescription{ - Name: "clusterextension-80117-2", - InstallNamespace: ns2, - PackageName: "nginx80117", - SaName: sa2, - Version: "1.0.1", - WatchNamespace: nsWatch2, - LabelKey: "olmv1-test", - LabelValue: labelValue, - Template: clusterextensionOwnSingleTemplate, - } - defer clusterextension2.Delete(oc) - _ = clusterextension2.CreateWithoutCheck(oc) - errWait = wait.PollUntilContextTimeout(context.TODO(), 3*time.Second, 150*time.Second, false, func(ctx context.Context) (bool, error) { - message, _ := olmv1util.GetNoEmpty(oc, "clusterextension", clusterextension2.Name, "-o", "jsonpath={.status.conditions[*].message}") - if !strings.Contains(message, "already exists") { - e2e.Logf("status is %s", message) - return false, nil - } - return true, nil - }) - exutil.AssertWaitPollNoErr(errWait, "status is not correct") - - g.By("9) Test SUCCESS") - - }) - - g.It("PolarionID:80120-[OTP][Skipped:Disconnected] Own Namespace Install Mode should be supported", func() { - if !olmv1util.IsFeaturegateEnabled(oc, "NewOLMOwnSingleNamespace") { - g.Skip("SingleOwnNamespaceInstallSupport is not enable, so skip it") - } - var ( - caseID = "80120" - labelValue = caseID - baseDir = exutil.FixturePath("testdata", "olm") - clustercatalogTemplate = filepath.Join(baseDir, "clustercatalog-withlabel.yaml") - clusterextensionOwnSingleTemplate = filepath.Join(baseDir, "clusterextension-withselectorlabel-withoutChannel-OwnSingle.yaml") - clusterextensionTemplate = filepath.Join(baseDir, "clusterextension-withselectorlabel-WithoutChannel.yaml") - - saClusterRoleBindingTemplate = filepath.Join(baseDir, "sa-admin.yaml") - ns = "ns-80120" - sa = "sa80120" - saCrb = olmv1util.SaCLusterRolebindingDescription{ - Name: sa, - Namespace: ns, - Template: saClusterRoleBindingTemplate, - } - clustercatalog = olmv1util.ClusterCatalogDescription{ - LabelKey: "olmv1-test", - LabelValue: labelValue, - Name: "clustercatalog-80120", - Imageref: "quay.io/openshifttest/nginxolm-operator-index:nginxolm80120", - Template: clustercatalogTemplate, - } - - clusterextension = olmv1util.ClusterExtensionDescription{ - Name: "clusterextension-80120", - InstallNamespace: ns, - PackageName: "nginx80120", - SaName: sa, - Version: "1.0.1", - LabelKey: "olmv1-test", - LabelValue: labelValue, - WatchNamespace: ns, - Template: clusterextensionOwnSingleTemplate, - } - clusterextensionAllNs = olmv1util.ClusterExtensionDescription{ - Name: "clusterextension-80120", - InstallNamespace: ns, - PackageName: "nginx80120", - SaName: sa, - Version: "3.0.0", - LabelKey: "olmv1-test", - LabelValue: labelValue, - Template: clusterextensionTemplate, - } - ) - - g.By("1) Create namespace, sa, clustercatalog") - defer func() { - _ = oc.WithoutNamespace().AsAdmin().Run("delete").Args("ns", ns, "--ignore-not-found").Execute() - }() - err := oc.WithoutNamespace().AsAdmin().Run("create").Args("ns", ns).Execute() - o.Expect(err).NotTo(o.HaveOccurred()) - o.Expect(olmv1util.Appearance(oc, exutil.Appear, "ns", ns)).To(o.BeTrue()) - - defer saCrb.Delete(oc) - saCrb.Create(oc) - - defer clustercatalog.Delete(oc) - clustercatalog.Create(oc) - - g.By("2) install clusterextension, version 1.0.1") - defer clusterextension.Delete(oc) - clusterextension.Create(oc) - o.Expect(clusterextension.InstalledBundle).To(o.ContainSubstring("v1.0.1")) - g.By("2.1) check deployment") - deploymentTargetNS, _ := olmv1util.GetNoEmpty(oc, "deployment", "nginx80120-controller-manager", "-n", ns, "-o", `jsonpath={.spec.template.metadata.annotations.olm\.targetNamespaces}`) - o.Expect(deploymentTargetNS).To(o.ContainSubstring(ns)) - g.By("2.2) check rolebinding") - rdNS, _ := olmv1util.GetNoEmpty(oc, "rolebinding", "-l", "olm.operatorframework.io/owner-name="+clusterextension.Name, "-n", ns, "-o", `jsonpath={..subjects[].namespace}`) - o.Expect(rdNS).To(o.ContainSubstring(ns)) - - g.By("3) upgrade clusterextension to 1.0.2, v1.0.2 only support OwnNamespace") - clusterextension.Patch(oc, `{"spec":{"source":{"catalog":{"version":"1.0.2"}}}}`) - errWait := wait.PollUntilContextTimeout(context.TODO(), 3*time.Second, 150*time.Second, false, func(ctx context.Context) (bool, error) { - clusterextension.GetBundleResource(oc) - if strings.Contains(clusterextension.InstalledBundle, "1.0.2") { - e2e.Logf("InstalledBundle is %s", clusterextension.InstalledBundle) - return true, nil - } - return false, nil - }) - exutil.AssertWaitPollNoErr(errWait, "nginx80120 1.0.2 is not installed") - - g.By("4) upgrade clusterextension to 3.0.0, support allnamespace") - clusterextensionAllNs.Create(oc) - errWait = wait.PollUntilContextTimeout(context.TODO(), 3*time.Second, 150*time.Second, false, func(ctx context.Context) (bool, error) { - clusterextension.GetBundleResource(oc) - if strings.Contains(clusterextension.InstalledBundle, "3.0.0") { - e2e.Logf("InstalledBundle is %s", clusterextension.InstalledBundle) - return true, nil - } - return false, nil - }) - exutil.AssertWaitPollNoErr(errWait, "nginx80120 3.0.0 is not installed") - g.By("4.1) check deployment") - deploymentTargetNS, _ = oc.AsAdmin().WithoutNamespace().Run("get").Args("deployment", "nginx80120-controller-manager", "-n", ns, "-o", `jsonpath={.spec.template.metadata.annotations.olm\.targetNamespaces}`).Output() - o.Expect(deploymentTargetNS).To(o.BeEmpty()) - g.By("4.2) check rolebinding") - rdNS, _ = oc.AsAdmin().WithoutNamespace().Run("get").Args("rolebinding", "-l", "olm.operatorframework.io/owner-name="+clusterextension.Name, "-n", ns).Output() - o.Expect(rdNS).To(o.ContainSubstring("No resources found")) - - g.By("5) upgrade clusterextension to 4.0.0, support OwnNamespace") - clusterextension.Version = "4.0.0" - clusterextension.Create(oc) - errWait = wait.PollUntilContextTimeout(context.TODO(), 3*time.Second, 150*time.Second, false, func(ctx context.Context) (bool, error) { - clusterextension.GetBundleResource(oc) - if strings.Contains(clusterextension.InstalledBundle, "4.0.0") { - e2e.Logf("InstalledBundle is %s", clusterextension.InstalledBundle) - return true, nil - } - return false, nil - }) - exutil.AssertWaitPollNoErr(errWait, "nginx80120 4.0.0 is not installed") - g.By("5.1) check deployment") - deploymentTargetNS, _ = olmv1util.GetNoEmpty(oc, "deployment", "nginx80120-controller-manager", "-n", ns, "-o", `jsonpath={.spec.template.metadata.annotations.olm\.targetNamespaces}`) - o.Expect(deploymentTargetNS).To(o.ContainSubstring(ns)) - g.By("5.2) check rolebinding") - rdNS, _ = olmv1util.GetNoEmpty(oc, "rolebinding", "-l", "olm.operatorframework.io/owner-name="+clusterextension.Name, "-n", ns, "-o", `jsonpath={..subjects[].namespace}`) - o.Expect(rdNS).To(o.ContainSubstring(ns)) - - g.By("6) if the annotations is not correct, error should be raised") - clusterextension.Delete(oc) - clusterextension = olmv1util.ClusterExtensionDescription{ - Name: "clusterextension-80120", - InstallNamespace: ns, - PackageName: "nginx80120", - SaName: sa, - Version: "1.0.1", - WatchNamespace: ns + "flake", - LabelKey: "olmv1-test", - LabelValue: labelValue, - Template: clusterextensionOwnSingleTemplate, - } - _ = clusterextension.CreateWithoutCheck(oc) - errWait = wait.PollUntilContextTimeout(context.TODO(), 3*time.Second, 150*time.Second, false, func(ctx context.Context) (bool, error) { - message, _ := olmv1util.GetNoEmpty(oc, "clusterextension", clusterextension.Name, "-o", "jsonpath={.status.conditions[*].message}") - if !strings.Contains(message, "invalid configuration") { - e2e.Logf("status is %s", message) - return false, nil - } - return true, nil - }) - exutil.AssertWaitPollNoErr(errWait, "nginx80120 status is not correct") - - g.By("7) Test SUCCESS") - - }) - - g.It("PolarionID:82136-[OTP][Skipped:Disconnected]olm v1 supports NetworkPolicy resources", func() { - var ( - caseID = "82136" - labelValue = caseID - baseDir = exutil.FixturePath("testdata", "olm") - clustercatalogTemplate = filepath.Join(baseDir, "clustercatalog-withlabel.yaml") - clusterextensionTemplate = filepath.Join(baseDir, "clusterextension-withselectorlabel-WithoutChannel.yaml") - saClusterRoleBindingTemplate = filepath.Join(baseDir, "sa-admin.yaml") - ns = "ns-82136" - sa = "sa82136" - saCrb = olmv1util.SaCLusterRolebindingDescription{ - Name: sa, - Namespace: ns, - Template: saClusterRoleBindingTemplate, - } - clustercatalog = olmv1util.ClusterCatalogDescription{ - LabelKey: "olmv1-test", - LabelValue: labelValue, - Name: "clustercatalog-82136", - Imageref: "quay.io/openshifttest/nginxolm-operator-index:nginxolm82136", - Template: clustercatalogTemplate, - } - clusterextension = olmv1util.ClusterExtensionDescription{ - Name: "clusterextension-82136", - InstallNamespace: ns, - PackageName: "nginx82136", - Version: "1.0.1", - SaName: sa, - LabelKey: "olmv1-test", - LabelValue: labelValue, - Template: clusterextensionTemplate, - } - ) - - g.By("Create namespace") - defer func() { - _ = oc.WithoutNamespace().AsAdmin().Run("delete").Args("ns", ns, "--ignore-not-found").Execute() - }() - err := oc.WithoutNamespace().AsAdmin().Run("create").Args("ns", ns).Execute() - o.Expect(err).NotTo(o.HaveOccurred()) - o.Expect(olmv1util.Appearance(oc, exutil.Appear, "ns", ns)).To(o.BeTrue()) - - g.By("Create SA for clusterextension") - defer saCrb.Delete(oc) - saCrb.Create(oc) - - g.By("1) Create clustercatalog") - defer clustercatalog.Delete(oc) - clustercatalog.Create(oc) - - g.By("2) Installnginx82136.v1.0.1, no networkpolicy") - defer clusterextension.Delete(oc) - clusterextension.Create(oc) - o.Expect(clusterextension.InstalledBundle).To(o.ContainSubstring("1.0.1")) - networkpolicies, err := oc.WithoutNamespace().AsAdmin().Run("get").Args("networkpolicy", "-n", ns).Output() - o.Expect(err).NotTo(o.HaveOccurred()) - o.Expect(networkpolicies).To(o.ContainSubstring("No resources found")) - - g.By("3) upgrade to nginx82136.v1.1.0, 1 networkpolicy, allow all ingress and all egress traffic") - clusterextension.Patch(oc, `{"spec":{"source":{"catalog":{"version":"1.1.0"}}}}`) - errWait := wait.PollUntilContextTimeout(context.TODO(), 3*time.Second, 150*time.Second, false, func(ctx context.Context) (bool, error) { - clusterextension.GetBundleResource(oc) - if strings.Contains(clusterextension.InstalledBundle, "1.1.0") { - e2e.Logf("InstalledBundle is %s", clusterextension.InstalledBundle) - return true, nil - } - return false, nil - }) - exutil.AssertWaitPollNoErr(errWait, "nginx82136 1.1.0 is not installed") - networkpolicies, err = oc.WithoutNamespace().AsAdmin().Run("get").Args("networkpolicy", "-n", ns).Output() - o.Expect(err).NotTo(o.HaveOccurred()) - o.Expect(networkpolicies).To(o.ContainSubstring("nginx82136-controller-acceptall")) - - g.By("4) upgrade to nginx82136.v2.0.0, 2 networkpolicy, one default deny all traffic, one for controller-manager") - clusterextension.Patch(oc, `{"spec":{"source":{"catalog":{"version":"2.0.0"}}}}`) - errWait = wait.PollUntilContextTimeout(context.TODO(), 3*time.Second, 150*time.Second, false, func(ctx context.Context) (bool, error) { - clusterextension.GetBundleResource(oc) - if strings.Contains(clusterextension.InstalledBundle, "2.0.0") { - e2e.Logf("InstalledBundle is %s", clusterextension.InstalledBundle) - return true, nil - } - return false, nil - }) - exutil.AssertWaitPollNoErr(errWait, "nginx82136 2.2.0 is not installed") - networkpolicies, err = oc.WithoutNamespace().AsAdmin().Run("get").Args("networkpolicy", "-n", ns).Output() - o.Expect(err).NotTo(o.HaveOccurred()) - o.Expect(networkpolicies).To(o.ContainSubstring("default-deny-all")) - o.Expect(networkpolicies).To(o.ContainSubstring("nginx82136-controller")) - o.Expect(networkpolicies).NotTo(o.ContainSubstring("nginx82136-controller-acceptall")) - - g.By("5) upgrade to nginx82136.v2.1.0, wrong networkpolicy") - clusterextension.Patch(oc, `{"spec":{"source":{"catalog":{"version":"2.1.0"}}}}`) - errWait = wait.PollUntilContextTimeout(context.TODO(), 3*time.Second, 150*time.Second, false, func(ctx context.Context) (bool, error) { - message, _ := olmv1util.GetNoEmpty(oc, "clusterextension", clusterextension.Name, "-o", "jsonpath={.status.conditions[*].message}") - if !strings.Contains(message, "Unsupported value") { - e2e.Logf("status is %s", message) - return false, nil - } - return true, nil - }) - exutil.AssertWaitPollNoErr(errWait, "nginx82136.v2.1.0 should not be installed, wrong error message") - - g.By("6) upgrade to nginx82136.v2.2.0, no networkpolicy") - clusterextension.Patch(oc, `{"spec":{"source":{"catalog":{"version":"2.2.0"}}}}`) - errWait = wait.PollUntilContextTimeout(context.TODO(), 3*time.Second, 150*time.Second, false, func(ctx context.Context) (bool, error) { - clusterextension.GetBundleResource(oc) - if strings.Contains(clusterextension.InstalledBundle, "2.2.0") { - e2e.Logf("InstalledBundle is %s", clusterextension.InstalledBundle) - return true, nil - } - return false, nil - }) - exutil.AssertWaitPollNoErr(errWait, "nginx82136 2.2.0 is not installed") - networkpolicies, err = oc.WithoutNamespace().AsAdmin().Run("get").Args("networkpolicy", "-n", ns).Output() - o.Expect(err).NotTo(o.HaveOccurred()) - o.Expect(networkpolicies).To(o.ContainSubstring("No resources found")) - - g.By("7) Test SUCCESS") - }) - }) diff --git a/openshift/tests-extension/test/qe/testdata/olm/clusterextension-withselectorlabel-WithoutVersion.yaml b/openshift/tests-extension/test/qe/testdata/olm/clusterextension-withoutChannel-OwnSingle.yaml similarity index 71% rename from openshift/tests-extension/test/qe/testdata/olm/clusterextension-withselectorlabel-WithoutVersion.yaml rename to openshift/tests-extension/test/qe/testdata/olm/clusterextension-withoutChannel-OwnSingle.yaml index fcccd8e72..757dd2766 100644 --- a/openshift/tests-extension/test/qe/testdata/olm/clusterextension-withselectorlabel-WithoutVersion.yaml +++ b/openshift/tests-extension/test/qe/testdata/olm/clusterextension-withoutChannel-OwnSingle.yaml @@ -7,6 +7,8 @@ objects: kind: ClusterExtension metadata: name: "${NAME}" + annotations: + olm.operatorframework.io/watch-namespace: "${WATCHNS}" spec: namespace: "${INSTALLNAMESPACE}" serviceAccount: @@ -15,23 +17,17 @@ objects: sourceType: "${SOURCETYPE}" catalog: packageName: "${PACKAGE}" - channels: - - "${CHANNEL}" - selector: - matchLabels: - "${LABELKEY}": "${LABELVALUE}" + version: "${VERSION}" upgradeConstraintPolicy: "${POLICY}" parameters: - name: NAME - name: INSTALLNAMESPACE +- name: WATCHNS - name: PACKAGE -- name: CHANNEL +- name: VERSION - name: SANAME - name: POLICY value: "CatalogProvided" -- name: LABELVALUE - # suggest to use case id -- name: LABELKEY - value: "olmv1-test" - name: SOURCETYPE value: "Catalog" + diff --git a/openshift/tests-extension/test/qe/testdata/olm/clusterextension-withselectorlabel-withoutChannel-OwnSingle.yaml b/openshift/tests-extension/test/qe/testdata/olm/clusterextension-withselectorlabel-withoutChannel-OwnSingle.yaml deleted file mode 100644 index 7a7dec1ec..000000000 --- a/openshift/tests-extension/test/qe/testdata/olm/clusterextension-withselectorlabel-withoutChannel-OwnSingle.yaml +++ /dev/null @@ -1,45 +0,0 @@ -apiVersion: template.openshift.io/v1 -kind: Template -metadata: - name: operator-template -objects: -- apiVersion: olm.operatorframework.io/v1 - kind: ClusterExtension - metadata: - name: "${NAME}" - spec: - namespace: "${INSTALLNAMESPACE}" - serviceAccount: - name: "${SANAME}" - config: - configType: Inline - inline: - watchNamespace: "${WATCHNS}" - source: - sourceType: "${SOURCETYPE}" - catalog: - packageName: "${PACKAGE}" - version: "${VERSION}" - selector: - matchLabels: - "${LABELKEY}": "${LABELVALUE}" - upgradeConstraintPolicy: "${POLICY}" -parameters: -- name: NAME -- name: INSTALLNAMESPACE -- name: WATCHNS -- name: PACKAGE -- name: VERSION -- name: SANAME -- name: POLICY - value: "CatalogProvided" -- name: SOURCETYPE - value: "Catalog" -- name: LABELVALUE - # suggest to use case id -- name: LABELKEY - value: "olmv1-test" - - - - From b67765dc18dc80f5610405342e4cc73c258e4ec3 Mon Sep 17 00:00:00 2001 From: Forrest Babcock Date: Fri, 5 Dec 2025 14:29:06 -0500 Subject: [PATCH 6/6] Revert "Merge pull request #566 from openshift-bot/synchronize-upstream" This reverts commit 6f593c0498793d01079cffc4423291d4c1103168, reversing changes made to 2b8b056fd89e1ebc0f8c6605a3627a3f0a4e1987. --- Makefile | 67 ++------ commitchecker.yaml | 2 +- go.mod | 12 +- go.sum | 24 +-- .../stargz-snapshotter/estargz/build.go | 111 +++---------- .../stargz-snapshotter/estargz/estargz.go | 9 - .../stargz-snapshotter/estargz/testutil.go | 155 +++++++----------- .../go-containerregistry/pkg/crane/copy.go | 6 +- .../pkg/legacy/tarball/write.go | 2 +- .../go-containerregistry/pkg/v1/config.go | 9 +- .../pkg/v1/mutate/mutate.go | 2 +- .../pkg/v1/remote/options.go | 9 +- .../pkg/v1/remote/transport/error.go | 6 +- .../vbatts/tar-split/archive/tar/common.go | 1 - .../vbatts/tar-split/archive/tar/reader.go | 9 +- vendor/golang.org/x/oauth2/deviceauth.go | 31 +--- vendor/golang.org/x/oauth2/oauth2.go | 2 +- vendor/golang.org/x/oauth2/pkce.go | 2 +- vendor/golang.org/x/oauth2/token.go | 2 +- vendor/golang.org/x/oauth2/transport.go | 2 +- vendor/modules.txt | 16 +- 21 files changed, 147 insertions(+), 332 deletions(-) diff --git a/Makefile b/Makefile index 17025fd9f..388471065 100644 --- a/Makefile +++ b/Makefile @@ -306,7 +306,6 @@ test-extension-developer-e2e: run-internal image-registry extension-developer-e2 .PHONY: run-latest-release run-latest-release: - @echo -e "\n\U23EC Using $(RELEASE_INSTALL) as release installer\n" curl -L -s https://github.com/operator-framework/operator-controller/releases/latest/download/$(notdir $(RELEASE_INSTALL)) | bash -s .PHONY: pre-upgrade-setup @@ -320,41 +319,24 @@ post-upgrade-checks: TEST_UPGRADE_E2E_TASKS := kind-cluster run-latest-release image-registry pre-upgrade-setup docker-build kind-load kind-deploy post-upgrade-checks kind-clean -.PHONY: test-upgrade-st2st-e2e -test-upgrade-st2st-e2e: SOURCE_MANIFEST := $(STANDARD_MANIFEST) -test-upgrade-st2st-e2e: RELEASE_INSTALL := $(STANDARD_RELEASE_INSTALL) -test-upgrade-st2st-e2e: KIND_CLUSTER_NAME := operator-controller-upgrade-st2st-e2e -test-upgrade-st2st-e2e: export MANIFEST := $(STANDARD_RELEASE_MANIFEST) -test-upgrade-st2st-e2e: export TEST_CLUSTER_CATALOG_NAME := test-catalog -test-upgrade-st2st-e2e: export TEST_CLUSTER_EXTENSION_NAME := test-package -test-upgrade-st2st-e2e: $(TEST_UPGRADE_E2E_TASKS) #HELP Run upgrade (standard -> standard) e2e tests on a local kind cluster - -.PHONY: test-upgrade-ex2ex-e2e -test-upgrade-ex2ex-e2e: SOURCE_MANIFEST := $(EXPERIMENTAL_MANIFEST) -test-upgrade-ex2ex-e2e: RELEASE_INSTALL := $(EXPERIMENTAL_RELEASE_INSTALL) -test-upgrade-ex2ex-e2e: KIND_CLUSTER_NAME := operator-controller-upgrade-ex2ex-e2e -test-upgrade-ex2ex-e2e: export MANIFEST := $(EXPERIMENTAL_RELEASE_MANIFEST) -test-upgrade-ex2ex-e2e: export TEST_CLUSTER_CATALOG_NAME := test-catalog -test-upgrade-ex2ex-e2e: export TEST_CLUSTER_EXTENSION_NAME := test-package -test-upgrade-ex2ex-e2e: $(TEST_UPGRADE_E2E_TASKS) #HELP Run upgrade (experimental -> experimental) e2e tests on a local kind cluster - -.PHONY: test-upgrade-st2ex-e2e -test-upgrade-st2ex-e2e: SOURCE_MANIFEST := $(EXPERIMENTAL_MANIFEST) -test-upgrade-st2ex-e2e: RELEASE_INSTALL := $(STANDARD_RELEASE_INSTALL) -test-upgrade-st2ex-e2e: KIND_CLUSTER_NAME := operator-controller-upgrade-st2ex-e2e -test-upgrade-st2ex-e2e: export MANIFEST := $(EXPERIMENTAL_RELEASE_MANIFEST) -test-upgrade-st2ex-e2e: export TEST_CLUSTER_CATALOG_NAME := test-catalog -test-upgrade-st2ex-e2e: export TEST_CLUSTER_EXTENSION_NAME := test-package -test-upgrade-st2ex-e2e: $(TEST_UPGRADE_E2E_TASKS) #HELP Run upgrade (standard -> experimental) e2e tests on a local kind cluster - -.PHONY: test-st2ex-e2e -test-st2ex-e2e: SOURCE_MANIFEST := $(STANDARD_MANIFEST) -test-st2ex-e2e: RELEASE_INSTALL := $(STANDARD_RELEASE_INSTALL) -test-st2ex-e2e: KIND_CLUSTER_NAME := operator-controller-st2ex-e2e -test-st2ex-e2e: export MANIFEST := $(STANDARD_RELEASE_MANIFEST) -test-st2ex-e2e: export TEST_CLUSTER_CATALOG_NAME := test-catalog -test-st2ex-e2e: export TEST_CLUSTER_EXTENSION_NAME := test-package -test-st2ex-e2e: run-internal image-registry pre-upgrade-setup kind-deploy-experimental post-upgrade-checks kind-clean #HELP Run swichover (standard -> experimental) e2e tests on a local kind cluster +.PHONY: test-upgrade-e2e +test-upgrade-e2e: SOURCE_MANIFEST := $(STANDARD_MANIFEST) +test-upgrade-e2e: RELEASE_INSTALL := $(STANDARD_RELEASE_INSTALL) +test-upgrade-e2e: KIND_CLUSTER_NAME := operator-controller-upgrade-e2e +test-upgrade-e2e: export MANIFEST := $(STANDARD_RELEASE_MANIFEST) +test-upgrade-e2e: export TEST_CLUSTER_CATALOG_NAME := test-catalog +test-upgrade-e2e: export TEST_CLUSTER_EXTENSION_NAME := test-package +test-upgrade-e2e: $(TEST_UPGRADE_E2E_TASKS) #HELP Run upgrade e2e tests on a local kind cluster + +.PHONY: test-upgrade-experimental-e2e +test-upgrade-experimental-e2e: SOURCE_MANIFEST := $(EXPERIMENTAL_MANIFEST) +test-upgrade-experimental-e2e: RELEASE_INSTALL := $(EXPERIMENTAL_RELEASE_INSTALL) +test-upgrade-experimental-e2e: KIND_CLUSTER_NAME := operator-controller-upgrade-experimental-e2e +test-upgrade-experimental-e2e: export MANIFEST := $(EXPERIMENTAL_RELEASE_MANIFEST) +test-upgrade-experimental-e2e: export TEST_CLUSTER_CATALOG_NAME := test-catalog +test-upgrade-experimental-e2e: export TEST_CLUSTER_EXTENSION_NAME := test-package +test-upgrade-experimental-e2e: $(TEST_UPGRADE_E2E_TASKS) #HELP Run upgrade e2e tests on a local kind cluster + .PHONY: e2e-coverage e2e-coverage: @@ -396,18 +378,6 @@ kind-deploy: manifests cp $(CATALOGS_MANIFEST) $(DEFAULT_CATALOG) envsubst '$$DEFAULT_CATALOG,$$CERT_MGR_VERSION,$$INSTALL_DEFAULT_CATALOGS,$$MANIFEST' < scripts/install.tpl.sh | bash -s -.PHONY: kind-deploy-experimental -kind-deploy-experimental: export DEFAULT_CATALOG := $(RELEASE_CATALOGS) -kind-deploy-experimental: SOURCE_MANIFEST := $(EXPERIMENTAL_MANIFEST) -kind-deploy-experimental: export MANIFEST := $(EXPERIMENTAL_RELEASE_MANIFEST) -kind-deploy-experimental: NAMESPACE := olmv1-system -# Have to be a _completely_ separate recipe, rather than having `kind-deploy` as a dependency, because `make` will think it was already built -kind-deploy-experimental: manifests - @echo -e "\n\U1F4D8 Using $(SOURCE_MANIFEST) as source manifest\n" - sed "s/cert-git-version/cert-$(VERSION)/g" $(SOURCE_MANIFEST) > $(MANIFEST) - cp $(CATALOGS_MANIFEST) $(DEFAULT_CATALOG) - envsubst '$$DEFAULT_CATALOG,$$CERT_MGR_VERSION,$$INSTALL_DEFAULT_CATALOGS,$$MANIFEST' < scripts/install.tpl.sh | bash -s - .PHONY: kind-cluster kind-cluster: $(KIND) kind-verify-versions #EXHELP Standup a kind cluster. -$(KIND) delete cluster --name $(KIND_CLUSTER_NAME) @@ -488,7 +458,6 @@ run-experimental: export MANIFEST := $(EXPERIMENTAL_RELEASE_MANIFEST) run-experimental: run-internal #HELP Build the operator-controller then deploy it with the experimental manifest into a new kind cluster. CATD_NAMESPACE := olmv1-system -.PHONY: wait wait: kubectl wait --for=condition=Available --namespace=$(CATD_NAMESPACE) deployment/catalogd-controller-manager --timeout=60s kubectl wait --for=condition=Ready --namespace=$(CATD_NAMESPACE) certificate/catalogd-service-cert # Avoid upgrade test flakes when reissuing cert diff --git a/commitchecker.yaml b/commitchecker.yaml index 0b1a24c57..8f76d2754 100644 --- a/commitchecker.yaml +++ b/commitchecker.yaml @@ -1,4 +1,4 @@ -expectedMergeBase: 045989d84a7570b1cfddeee47eae64d47245aff2 +expectedMergeBase: d243e10e76b37e3b8d65e6e686044c21eef047a4 upstreamBranch: main upstreamOrg: operator-framework upstreamRepo: operator-controller diff --git a/go.mod b/go.mod index b94dd3369..6d5cb1703 100644 --- a/go.mod +++ b/go.mod @@ -12,7 +12,7 @@ require ( github.com/go-logr/logr v1.4.3 github.com/golang-jwt/jwt/v5 v5.3.0 github.com/google/go-cmp v0.7.0 - github.com/google/go-containerregistry v0.20.7 + github.com/google/go-containerregistry v0.20.6 github.com/google/renameio/v2 v2.0.1 github.com/gorilla/handlers v1.5.2 github.com/klauspost/compress v1.18.1 @@ -81,7 +81,7 @@ require ( github.com/containerd/errdefs/pkg v0.3.0 // indirect github.com/containerd/log v0.1.0 // indirect github.com/containerd/platforms v0.2.1 // indirect - github.com/containerd/stargz-snapshotter/estargz v0.18.1 // indirect + github.com/containerd/stargz-snapshotter/estargz v0.17.0 // indirect github.com/containerd/ttrpc v1.2.7 // indirect github.com/containerd/typeurl/v2 v2.2.3 // indirect github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01 // indirect @@ -90,9 +90,9 @@ require ( github.com/cyphar/filepath-securejoin v0.6.0 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/distribution/reference v0.6.0 // indirect - github.com/docker/cli v29.0.3+incompatible // indirect + github.com/docker/cli v29.0.0+incompatible // indirect github.com/docker/distribution v2.8.3+incompatible // indirect - github.com/docker/docker v28.5.2+incompatible // indirect + github.com/docker/docker v28.5.1+incompatible // indirect github.com/docker/docker-credential-helpers v0.9.4 // indirect github.com/docker/go-connections v0.6.0 // indirect github.com/docker/go-units v0.5.0 // indirect @@ -206,7 +206,7 @@ require ( github.com/stretchr/objx v0.5.2 // indirect github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399 // indirect github.com/ulikunitz/xz v0.5.15 // indirect - github.com/vbatts/tar-split v0.12.2 // indirect + github.com/vbatts/tar-split v0.12.1 // indirect github.com/vbauerster/mpb/v8 v8.10.2 // indirect github.com/x448/float16 v0.8.4 // indirect github.com/xlab/treeprint v1.2.0 // indirect @@ -227,7 +227,7 @@ require ( go.yaml.in/yaml/v3 v3.0.4 // indirect golang.org/x/crypto v0.45.0 // indirect golang.org/x/net v0.47.0 // indirect - golang.org/x/oauth2 v0.33.0 // indirect + golang.org/x/oauth2 v0.32.0 // indirect golang.org/x/sys v0.38.0 // indirect golang.org/x/term v0.37.0 // indirect golang.org/x/text v0.31.0 // indirect diff --git a/go.sum b/go.sum index 1db7e0516..099311074 100644 --- a/go.sum +++ b/go.sum @@ -71,8 +71,8 @@ github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I= github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo= github.com/containerd/platforms v0.2.1 h1:zvwtM3rz2YHPQsF2CHYM8+KtB5dvhISiXh5ZpSBQv6A= github.com/containerd/platforms v0.2.1/go.mod h1:XHCb+2/hzowdiut9rkudds9bE5yJ7npe7dG/wG+uFPw= -github.com/containerd/stargz-snapshotter/estargz v0.18.1 h1:cy2/lpgBXDA3cDKSyEfNOFMA/c10O1axL69EU7iirO8= -github.com/containerd/stargz-snapshotter/estargz v0.18.1/go.mod h1:ALIEqa7B6oVDsrF37GkGN20SuvG/pIMm7FwP7ZmRb0Q= +github.com/containerd/stargz-snapshotter/estargz v0.17.0 h1:+TyQIsR/zSFI1Rm31EQBwpAA1ovYgIKHy7kctL3sLcE= +github.com/containerd/stargz-snapshotter/estargz v0.17.0/go.mod h1:s06tWAiJcXQo9/8AReBCIo/QxcXFZ2n4qfsRnpl71SM= github.com/containerd/ttrpc v1.2.7 h1:qIrroQvuOL9HQ1X6KHe2ohc7p+HP/0VE6XPU7elJRqQ= github.com/containerd/ttrpc v1.2.7/go.mod h1:YCXHsb32f+Sq5/72xHubdiJRQY9inL4a4ZQrAbN1q9o= github.com/containerd/typeurl/v2 v2.2.3 h1:yNA/94zxWdvYACdYO8zofhrTVuQY73fFU1y++dYSw40= @@ -104,12 +104,12 @@ github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5Qvfr github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= github.com/dlclark/regexp2 v1.11.0 h1:G/nrcoOa7ZXlpoa/91N3X7mM3r8eIlMBBJZvsz/mxKI= github.com/dlclark/regexp2 v1.11.0/go.mod h1:DHkYz0B9wPfa6wondMfaivmHpzrQ3v9q8cnmRbL6yW8= -github.com/docker/cli v29.0.3+incompatible h1:8J+PZIcF2xLd6h5sHPsp5pvvJA+Sr2wGQxHkRl53a1E= -github.com/docker/cli v29.0.3+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= +github.com/docker/cli v29.0.0+incompatible h1:KgsN2RUFMNM8wChxryicn4p46BdQWpXOA1XLGBGPGAw= +github.com/docker/cli v29.0.0+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= github.com/docker/distribution v2.8.3+incompatible h1:AtKxIZ36LoNK51+Z6RpzLpddBirtxJnzDrHLEKxTAYk= github.com/docker/distribution v2.8.3+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= -github.com/docker/docker v28.5.2+incompatible h1:DBX0Y0zAjZbSrm1uzOkdr1onVghKaftjlSWt4AFexzM= -github.com/docker/docker v28.5.2+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v28.5.1+incompatible h1:Bm8DchhSD2J6PsFzxC35TZo4TLGR2PdW/E69rU45NhM= +github.com/docker/docker v28.5.1+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/docker-credential-helpers v0.9.4 h1:76ItO69/AP/V4yT9V4uuuItG0B1N8hvt0T0c0NN/DzI= github.com/docker/docker-credential-helpers v0.9.4/go.mod h1:v1S+hepowrQXITkEfw6o4+BMbGot02wiKpzWhGUZK6c= github.com/docker/go-connections v0.6.0 h1:LlMG9azAe1TqfR7sO+NJttz1gy6KO7VJBh+pMmjSD94= @@ -241,8 +241,8 @@ github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= -github.com/google/go-containerregistry v0.20.7 h1:24VGNpS0IwrOZ2ms2P1QE3Xa5X9p4phx0aUgzYzHW6I= -github.com/google/go-containerregistry v0.20.7/go.mod h1:Lx5LCZQjLH1QBaMPeGwsME9biPeo1lPx6lbGj/UmzgM= +github.com/google/go-containerregistry v0.20.6 h1:cvWX87UxxLgaH76b4hIvya6Dzz9qHB31qAwjAohdSTU= +github.com/google/go-containerregistry v0.20.6/go.mod h1:T0x8MuoAoKX/873bkeSfLD2FAkwCDf9/HZgsFJ02E2Y= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= @@ -484,8 +484,8 @@ github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399 h1:e/5i7d4oYZ+C github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399/go.mod h1:LdwHTNJT99C5fTAzDz0ud328OgXz+gierycbcIx2fRs= github.com/ulikunitz/xz v0.5.15 h1:9DNdB5s+SgV3bQ2ApL10xRc35ck0DuIX/isZvIk+ubY= github.com/ulikunitz/xz v0.5.15/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= -github.com/vbatts/tar-split v0.12.2 h1:w/Y6tjxpeiFMR47yzZPlPj/FcPLpXbTUi/9H7d3CPa4= -github.com/vbatts/tar-split v0.12.2/go.mod h1:eF6B6i6ftWQcDqEn3/iGFRFRo8cBIMSJVOpnNdfTMFA= +github.com/vbatts/tar-split v0.12.1 h1:CqKoORW7BUWBe7UL/iqTVvkTBOF8UvOMKOIZykxnnbo= +github.com/vbatts/tar-split v0.12.1/go.mod h1:eF6B6i6ftWQcDqEn3/iGFRFRo8cBIMSJVOpnNdfTMFA= github.com/vbauerster/mpb/v8 v8.10.2 h1:2uBykSHAYHekE11YvJhKxYmLATKHAGorZwFlyNw4hHM= github.com/vbauerster/mpb/v8 v8.10.2/go.mod h1:+Ja4P92E3/CorSZgfDtK46D7AVbDqmBQRTmyTqPElo0= github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= @@ -613,8 +613,8 @@ golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM= golang.org/x/net v0.47.0 h1:Mx+4dIFzqraBXUugkia1OOvlD6LemFo1ALMHjrXDOhY= golang.org/x/net v0.47.0/go.mod h1:/jNxtkgq5yWUGYkaZGqo27cfGZ1c5Nen03aYrrKpVRU= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.33.0 h1:4Q+qn+E5z8gPRJfmRy7C2gGG3T4jIprK6aSYgTXGRpo= -golang.org/x/oauth2 v0.33.0/go.mod h1:lzm5WQJQwKZ3nwavOZ3IS5Aulzxi68dUSgRHujetwEA= +golang.org/x/oauth2 v0.32.0 h1:jsCblLleRMDrxMN29H3z/k1KliIvpLgCkE6R8FXXNgY= +golang.org/x/oauth2 v0.32.0/go.mod h1:lzm5WQJQwKZ3nwavOZ3IS5Aulzxi68dUSgRHujetwEA= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= diff --git a/vendor/github.com/containerd/stargz-snapshotter/estargz/build.go b/vendor/github.com/containerd/stargz-snapshotter/estargz/build.go index a9e1b72ba..8b804b7dd 100644 --- a/vendor/github.com/containerd/stargz-snapshotter/estargz/build.go +++ b/vendor/github.com/containerd/stargz-snapshotter/estargz/build.go @@ -35,7 +35,6 @@ import ( "runtime" "strings" "sync" - "sync/atomic" "github.com/containerd/stargz-snapshotter/estargz/errorutil" "github.com/klauspost/compress/zstd" @@ -43,8 +42,6 @@ import ( "golang.org/x/sync/errgroup" ) -type GzipHelperFunc func(io.Reader) (io.ReadCloser, error) - type options struct { chunkSize int compressionLevel int @@ -53,7 +50,6 @@ type options struct { compression Compression ctx context.Context minChunkSize int - gzipHelperFunc GzipHelperFunc } type Option func(o *options) error @@ -131,25 +127,11 @@ func WithMinChunkSize(minChunkSize int) Option { } } -// WithGzipHelperFunc option specifies a custom function to decompress gzip-compressed layers. -// When a gzip-compressed layer is detected, this function will be used instead of the -// Go standard library gzip decompression for better performance. -// The function should take an io.Reader as input and return an io.ReadCloser. -// If nil, the Go standard library gzip.NewReader will be used. -func WithGzipHelperFunc(gzipHelperFunc GzipHelperFunc) Option { - return func(o *options) error { - o.gzipHelperFunc = gzipHelperFunc - return nil - } -} - // Blob is an eStargz blob. type Blob struct { io.ReadCloser - diffID digest.Digester - tocDigest digest.Digest - readCompleted *atomic.Bool - uncompressedSize *atomic.Int64 + diffID digest.Digester + tocDigest digest.Digest } // DiffID returns the digest of uncompressed blob. @@ -163,19 +145,6 @@ func (b *Blob) TOCDigest() digest.Digest { return b.tocDigest } -// UncompressedSize returns the size of uncompressed blob. -// UncompressedSize should only be called after the blob has been fully read. -func (b *Blob) UncompressedSize() (int64, error) { - switch { - case b.uncompressedSize == nil || b.readCompleted == nil: - return -1, fmt.Errorf("readCompleted or uncompressedSize is not initialized") - case !b.readCompleted.Load(): - return -1, fmt.Errorf("called UncompressedSize before the blob has been fully read") - default: - return b.uncompressedSize.Load(), nil - } -} - // Build builds an eStargz blob which is an extended version of stargz, from a blob (gzip, zstd // or plain tar) passed through the argument. If there are some prioritized files are listed in // the option, these files are grouped as "prioritized" and can be used for runtime optimization @@ -217,7 +186,7 @@ func Build(tarBlob *io.SectionReader, opt ...Option) (_ *Blob, rErr error) { rErr = fmt.Errorf("error from context %q: %w", cErr, rErr) } }() - tarBlob, err := decompressBlob(tarBlob, layerFiles, opts.gzipHelperFunc) + tarBlob, err := decompressBlob(tarBlob, layerFiles) if err != nil { return nil, err } @@ -283,28 +252,17 @@ func Build(tarBlob *io.SectionReader, opt ...Option) (_ *Blob, rErr error) { } diffID := digest.Canonical.Digester() pr, pw := io.Pipe() - readCompleted := new(atomic.Bool) - uncompressedSize := new(atomic.Int64) go func() { - var size int64 - var decompressFunc func(io.Reader) (io.ReadCloser, error) - if _, ok := opts.compression.(*gzipCompression); ok && opts.gzipHelperFunc != nil { - decompressFunc = opts.gzipHelperFunc - } else { - decompressFunc = opts.compression.Reader - } - decompressR, err := decompressFunc(io.TeeReader(io.MultiReader(append(rs, tocAndFooter)...), pw)) + r, err := opts.compression.Reader(io.TeeReader(io.MultiReader(append(rs, tocAndFooter)...), pw)) if err != nil { pw.CloseWithError(err) return } - defer decompressR.Close() - if size, err = io.Copy(diffID.Hash(), decompressR); err != nil { + defer r.Close() + if _, err := io.Copy(diffID.Hash(), r); err != nil { pw.CloseWithError(err) return } - uncompressedSize.Store(size) - readCompleted.Store(true) pw.Close() }() return &Blob{ @@ -312,10 +270,8 @@ func Build(tarBlob *io.SectionReader, opt ...Option) (_ *Blob, rErr error) { Reader: pr, closeFunc: layerFiles.CleanupAll, }, - tocDigest: tocDgst, - diffID: diffID, - readCompleted: readCompleted, - uncompressedSize: uncompressedSize, + tocDigest: tocDgst, + diffID: diffID, }, nil } @@ -410,9 +366,8 @@ func sortEntries(in io.ReaderAt, prioritized []string, missedPrioritized *[]stri // Sort the tar file respecting to the prioritized files list. sorted := &tarFile{} - picked := make(map[string]struct{}) for _, l := range prioritized { - if err := moveRec(l, intar, sorted, picked); err != nil { + if err := moveRec(l, intar, sorted); err != nil { if errors.Is(err, errNotFound) && missedPrioritized != nil { *missedPrioritized = append(*missedPrioritized, l) continue // allow not found @@ -440,8 +395,8 @@ func sortEntries(in io.ReaderAt, prioritized []string, missedPrioritized *[]stri }) } - // Dump prioritized entries followed by the rest entries while skipping picked ones. - return append(sorted.dump(nil), intar.dump(picked)...), nil + // Dump all entry and concatinate them. + return append(sorted.dump(), intar.dump()...), nil } // readerFromEntries returns a reader of tar archive that contains entries passed @@ -503,42 +458,36 @@ func importTar(in io.ReaderAt) (*tarFile, error) { return tf, nil } -func moveRec(name string, in *tarFile, out *tarFile, picked map[string]struct{}) error { +func moveRec(name string, in *tarFile, out *tarFile) error { name = cleanEntryName(name) if name == "" { // root directory. stop recursion. if e, ok := in.get(name); ok { // entry of the root directory exists. we should move it as well. // this case will occur if tar entries are prefixed with "./", "/", etc. - if _, done := picked[name]; !done { - out.add(e) - picked[name] = struct{}{} - } + out.add(e) + in.remove(name) } return nil } _, okIn := in.get(name) _, okOut := out.get(name) - _, okPicked := picked[name] - if !okIn && !okOut && !okPicked { + if !okIn && !okOut { return fmt.Errorf("file: %q: %w", name, errNotFound) } parent, _ := path.Split(strings.TrimSuffix(name, "/")) - if err := moveRec(parent, in, out, picked); err != nil { + if err := moveRec(parent, in, out); err != nil { return err } if e, ok := in.get(name); ok && e.header.Typeflag == tar.TypeLink { - if err := moveRec(e.header.Linkname, in, out, picked); err != nil { + if err := moveRec(e.header.Linkname, in, out); err != nil { return err } } - if _, done := picked[name]; done { - return nil - } if e, ok := in.get(name); ok { out.add(e) - picked[name] = struct{}{} + in.remove(name) } return nil } @@ -584,18 +533,8 @@ func (f *tarFile) get(name string) (e *entry, ok bool) { return } -func (f *tarFile) dump(skip map[string]struct{}) []*entry { - if len(skip) == 0 { - return f.stream - } - var out []*entry - for _, e := range f.stream { - if _, ok := skip[cleanEntryName(e.header.Name)]; ok { - continue - } - out = append(out, e) - } - return out +func (f *tarFile) dump() []*entry { + return f.stream } type readCloser struct { @@ -710,7 +649,7 @@ func (cr *countReadSeeker) currentPos() int64 { return *cr.cPos } -func decompressBlob(org *io.SectionReader, tmp *tempFiles, gzipHelperFunc GzipHelperFunc) (*io.SectionReader, error) { +func decompressBlob(org *io.SectionReader, tmp *tempFiles) (*io.SectionReader, error) { if org.Size() < 4 { return org, nil } @@ -721,13 +660,7 @@ func decompressBlob(org *io.SectionReader, tmp *tempFiles, gzipHelperFunc GzipHe var dR io.Reader if bytes.Equal([]byte{0x1F, 0x8B, 0x08}, src[:3]) { // gzip - var dgR io.ReadCloser - var err error - if gzipHelperFunc != nil { - dgR, err = gzipHelperFunc(io.NewSectionReader(org, 0, org.Size())) - } else { - dgR, err = gzip.NewReader(io.NewSectionReader(org, 0, org.Size())) - } + dgR, err := gzip.NewReader(io.NewSectionReader(org, 0, org.Size())) if err != nil { return nil, err } diff --git a/vendor/github.com/containerd/stargz-snapshotter/estargz/estargz.go b/vendor/github.com/containerd/stargz-snapshotter/estargz/estargz.go index ff91a37ad..f4d554655 100644 --- a/vendor/github.com/containerd/stargz-snapshotter/estargz/estargz.go +++ b/vendor/github.com/containerd/stargz-snapshotter/estargz/estargz.go @@ -307,15 +307,6 @@ func (r *Reader) initFields() error { } } - if len(r.m) == 0 { - r.m[""] = &TOCEntry{ - Name: "", - Type: "dir", - Mode: 0755, - NumLink: 1, - } - } - return nil } diff --git a/vendor/github.com/containerd/stargz-snapshotter/estargz/testutil.go b/vendor/github.com/containerd/stargz-snapshotter/estargz/testutil.go index ff165e090..a8dcdb868 100644 --- a/vendor/github.com/containerd/stargz-snapshotter/estargz/testutil.go +++ b/vendor/github.com/containerd/stargz-snapshotter/estargz/testutil.go @@ -38,6 +38,7 @@ import ( "reflect" "sort" "strings" + "testing" "time" "github.com/containerd/stargz-snapshotter/estargz/errorutil" @@ -48,48 +49,16 @@ import ( // TestingController is Compression with some helper methods necessary for testing. type TestingController interface { Compression - TestStreams(t TestingT, b []byte, streams []int64) - DiffIDOf(TestingT, []byte) string + TestStreams(t *testing.T, b []byte, streams []int64) + DiffIDOf(*testing.T, []byte) string String() string } -// TestingT is the minimal set of testing.T required to run the -// tests defined in CompressionTestSuite. This interface exists to prevent -// leaking the testing package from being exposed outside tests. -type TestingT interface { - Errorf(format string, args ...any) - FailNow() - Failed() bool - Fatal(args ...any) - Fatalf(format string, args ...any) - Logf(format string, args ...any) - Parallel() -} - -// Runner allows running subtests of TestingT. This exists instead of adding -// a Run method to TestingT interface because the Run implementation of -// testing.T would not satisfy the interface. -type Runner func(t TestingT, name string, fn func(t TestingT)) - -type TestRunner struct { - TestingT - Runner Runner -} - -func (r *TestRunner) Run(name string, run func(*TestRunner)) { - r.Runner(r.TestingT, name, func(t TestingT) { - run(&TestRunner{TestingT: t, Runner: r.Runner}) - }) -} - // CompressionTestSuite tests this pkg with controllers can build valid eStargz blobs and parse them. -func CompressionTestSuite(t *TestRunner, controllers ...TestingControllerFactory) { - t.Run("testBuild", func(t *TestRunner) { t.Parallel(); testBuild(t, controllers...) }) - t.Run("testDigestAndVerify", func(t *TestRunner) { - t.Parallel() - testDigestAndVerify(t, controllers...) - }) - t.Run("testWriteAndOpen", func(t *TestRunner) { t.Parallel(); testWriteAndOpen(t, controllers...) }) +func CompressionTestSuite(t *testing.T, controllers ...TestingControllerFactory) { + t.Run("testBuild", func(t *testing.T) { t.Parallel(); testBuild(t, controllers...) }) + t.Run("testDigestAndVerify", func(t *testing.T) { t.Parallel(); testDigestAndVerify(t, controllers...) }) + t.Run("testWriteAndOpen", func(t *testing.T) { t.Parallel(); testWriteAndOpen(t, controllers...) }) } type TestingControllerFactory func() TestingController @@ -110,7 +79,7 @@ var allowedPrefix = [4]string{"", "./", "/", "../"} // testBuild tests the resulting stargz blob built by this pkg has the same // contents as the normal stargz blob. -func testBuild(t *TestRunner, controllers ...TestingControllerFactory) { +func testBuild(t *testing.T, controllers ...TestingControllerFactory) { tests := []struct { name string chunkSize int @@ -196,7 +165,7 @@ func testBuild(t *TestRunner, controllers ...TestingControllerFactory) { prefix := prefix for _, minChunkSize := range tt.minChunkSize { minChunkSize := minChunkSize - t.Run(tt.name+"-"+fmt.Sprintf("compression=%v,prefix=%q,src=%d,format=%s,minChunkSize=%d", newCL(), prefix, srcCompression, srcTarFormat, minChunkSize), func(t *TestRunner) { + t.Run(tt.name+"-"+fmt.Sprintf("compression=%v,prefix=%q,src=%d,format=%s,minChunkSize=%d", newCL(), prefix, srcCompression, srcTarFormat, minChunkSize), func(t *testing.T) { tarBlob := buildTar(t, tt.in, prefix, srcTarFormat) // Test divideEntries() entries, err := sortEntries(tarBlob, nil, nil) // identical order @@ -296,7 +265,7 @@ func testBuild(t *TestRunner, controllers ...TestingControllerFactory) { } } -func isSameTarGz(t TestingT, cla TestingController, a []byte, clb TestingController, b []byte) bool { +func isSameTarGz(t *testing.T, cla TestingController, a []byte, clb TestingController, b []byte) bool { aGz, err := cla.Reader(bytes.NewReader(a)) if err != nil { t.Fatalf("failed to read A") @@ -356,7 +325,7 @@ func isSameTarGz(t TestingT, cla TestingController, a []byte, clb TestingControl return true } -func isSameVersion(t TestingT, cla TestingController, a []byte, clb TestingController, b []byte) bool { +func isSameVersion(t *testing.T, cla TestingController, a []byte, clb TestingController, b []byte) bool { aJTOC, _, err := parseStargz(io.NewSectionReader(bytes.NewReader(a), 0, int64(len(a))), cla) if err != nil { t.Fatalf("failed to parse A: %v", err) @@ -370,7 +339,7 @@ func isSameVersion(t TestingT, cla TestingController, a []byte, clb TestingContr return aJTOC.Version == bJTOC.Version } -func isSameEntries(t TestingT, a, b *Reader) bool { +func isSameEntries(t *testing.T, a, b *Reader) bool { aroot, ok := a.Lookup("") if !ok { t.Fatalf("failed to get root of A") @@ -384,7 +353,7 @@ func isSameEntries(t TestingT, a, b *Reader) bool { return contains(t, aEntry, bEntry) && contains(t, bEntry, aEntry) } -func compressBlob(t TestingT, src *io.SectionReader, srcCompression int) *io.SectionReader { +func compressBlob(t *testing.T, src *io.SectionReader, srcCompression int) *io.SectionReader { buf := new(bytes.Buffer) var w io.WriteCloser var err error @@ -418,7 +387,7 @@ type stargzEntry struct { // contains checks if all child entries in "b" are also contained in "a". // This function also checks if the files/chunks contain the same contents among "a" and "b". -func contains(t TestingT, a, b stargzEntry) bool { +func contains(t *testing.T, a, b stargzEntry) bool { ae, ar := a.e, a.r be, br := b.e, b.r t.Logf("Comparing: %q vs %q", ae.Name, be.Name) @@ -529,7 +498,7 @@ func equalEntry(a, b *TOCEntry) bool { a.Digest == b.Digest } -func readOffset(t TestingT, r *io.SectionReader, offset int64, e stargzEntry) ([]byte, int64, bool) { +func readOffset(t *testing.T, r *io.SectionReader, offset int64, e stargzEntry) ([]byte, int64, bool) { ce, ok := e.r.ChunkEntryForOffset(e.e.Name, offset) if !ok { return nil, 0, false @@ -548,7 +517,7 @@ func readOffset(t TestingT, r *io.SectionReader, offset int64, e stargzEntry) ([ return data[:n], offset + ce.ChunkSize, true } -func dumpTOCJSON(t TestingT, tocJSON *JTOC) string { +func dumpTOCJSON(t *testing.T, tocJSON *JTOC) string { jtocData, err := json.Marshal(*tocJSON) if err != nil { t.Fatalf("failed to marshal TOC JSON: %v", err) @@ -562,19 +531,20 @@ func dumpTOCJSON(t TestingT, tocJSON *JTOC) string { const chunkSize = 3 -type check func(t *TestRunner, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, controller TestingController, newController TestingControllerFactory) +// type check func(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, compressionLevel int) +type check func(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, controller TestingController, newController TestingControllerFactory) // testDigestAndVerify runs specified checks against sample stargz blobs. -func testDigestAndVerify(t *TestRunner, controllers ...TestingControllerFactory) { +func testDigestAndVerify(t *testing.T, controllers ...TestingControllerFactory) { tests := []struct { name string - tarInit func(t TestingT, dgstMap map[string]digest.Digest) (blob []tarEntry) + tarInit func(t *testing.T, dgstMap map[string]digest.Digest) (blob []tarEntry) checks []check minChunkSize []int }{ { name: "no-regfile", - tarInit: func(t TestingT, dgstMap map[string]digest.Digest) (blob []tarEntry) { + tarInit: func(t *testing.T, dgstMap map[string]digest.Digest) (blob []tarEntry) { return tarOf( dir("test/"), ) @@ -589,7 +559,7 @@ func testDigestAndVerify(t *TestRunner, controllers ...TestingControllerFactory) }, { name: "small-files", - tarInit: func(t TestingT, dgstMap map[string]digest.Digest) (blob []tarEntry) { + tarInit: func(t *testing.T, dgstMap map[string]digest.Digest) (blob []tarEntry) { return tarOf( regDigest(t, "baz.txt", "", dgstMap), regDigest(t, "foo.txt", "a", dgstMap), @@ -613,7 +583,7 @@ func testDigestAndVerify(t *TestRunner, controllers ...TestingControllerFactory) }, { name: "big-files", - tarInit: func(t TestingT, dgstMap map[string]digest.Digest) (blob []tarEntry) { + tarInit: func(t *testing.T, dgstMap map[string]digest.Digest) (blob []tarEntry) { return tarOf( regDigest(t, "baz.txt", "bazbazbazbazbazbazbaz", dgstMap), regDigest(t, "foo.txt", "a", dgstMap), @@ -637,7 +607,7 @@ func testDigestAndVerify(t *TestRunner, controllers ...TestingControllerFactory) { name: "with-non-regfiles", minChunkSize: []int{0, 64000}, - tarInit: func(t TestingT, dgstMap map[string]digest.Digest) (blob []tarEntry) { + tarInit: func(t *testing.T, dgstMap map[string]digest.Digest) (blob []tarEntry) { return tarOf( regDigest(t, "baz.txt", "bazbazbazbazbazbazbaz", dgstMap), regDigest(t, "foo.txt", "a", dgstMap), @@ -684,7 +654,7 @@ func testDigestAndVerify(t *TestRunner, controllers ...TestingControllerFactory) srcTarFormat := srcTarFormat for _, minChunkSize := range tt.minChunkSize { minChunkSize := minChunkSize - t.Run(tt.name+"-"+fmt.Sprintf("compression=%v,prefix=%q,format=%s,minChunkSize=%d", newCL(), prefix, srcTarFormat, minChunkSize), func(t *TestRunner) { + t.Run(tt.name+"-"+fmt.Sprintf("compression=%v,prefix=%q,format=%s,minChunkSize=%d", newCL(), prefix, srcTarFormat, minChunkSize), func(t *testing.T) { // Get original tar file and chunk digests dgstMap := make(map[string]digest.Digest) tarBlob := buildTar(t, tt.tarInit(t, dgstMap), prefix, srcTarFormat) @@ -720,7 +690,7 @@ func testDigestAndVerify(t *TestRunner, controllers ...TestingControllerFactory) // checkStargzTOC checks the TOC JSON of the passed stargz has the expected // digest and contains valid chunks. It walks all entries in the stargz and // checks all chunk digests stored to the TOC JSON match the actual contents. -func checkStargzTOC(t *TestRunner, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, controller TestingController, newController TestingControllerFactory) { +func checkStargzTOC(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, controller TestingController, newController TestingControllerFactory) { sgz, err := Open( io.NewSectionReader(bytes.NewReader(sgzData), 0, int64(len(sgzData))), WithDecompressors(controller), @@ -831,7 +801,7 @@ func checkStargzTOC(t *TestRunner, sgzData []byte, tocDigest digest.Digest, dgst // checkVerifyTOC checks the verification works for the TOC JSON of the passed // stargz. It walks all entries in the stargz and checks the verifications for // all chunks work. -func checkVerifyTOC(t *TestRunner, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, controller TestingController, newController TestingControllerFactory) { +func checkVerifyTOC(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, controller TestingController, newController TestingControllerFactory) { sgz, err := Open( io.NewSectionReader(bytes.NewReader(sgzData), 0, int64(len(sgzData))), WithDecompressors(controller), @@ -912,9 +882,9 @@ func checkVerifyTOC(t *TestRunner, sgzData []byte, tocDigest digest.Digest, dgst // checkVerifyInvalidTOCEntryFail checks if misconfigured TOC JSON can be // detected during the verification and the verification returns an error. func checkVerifyInvalidTOCEntryFail(filename string) check { - return func(t *TestRunner, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, controller TestingController, newController TestingControllerFactory) { + return func(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, controller TestingController, newController TestingControllerFactory) { funcs := map[string]rewriteFunc{ - "lost digest in a entry": func(t TestingT, toc *JTOC, sgz *io.SectionReader) { + "lost digest in a entry": func(t *testing.T, toc *JTOC, sgz *io.SectionReader) { var found bool for _, e := range toc.Entries { if cleanEntryName(e.Name) == filename { @@ -932,7 +902,7 @@ func checkVerifyInvalidTOCEntryFail(filename string) check { t.Fatalf("rewrite target not found") } }, - "duplicated entry offset": func(t TestingT, toc *JTOC, sgz *io.SectionReader) { + "duplicated entry offset": func(t *testing.T, toc *JTOC, sgz *io.SectionReader) { var ( sampleEntry *TOCEntry targetEntry *TOCEntry @@ -959,7 +929,7 @@ func checkVerifyInvalidTOCEntryFail(filename string) check { } for name, rFunc := range funcs { - t.Run(name, func(t *TestRunner) { + t.Run(name, func(t *testing.T) { newSgz, newTocDigest := rewriteTOCJSON(t, io.NewSectionReader(bytes.NewReader(sgzData), 0, int64(len(sgzData))), rFunc, controller) buf := new(bytes.Buffer) if _, err := io.Copy(buf, newSgz); err != nil { @@ -988,7 +958,7 @@ func checkVerifyInvalidTOCEntryFail(filename string) check { // checkVerifyInvalidStargzFail checks if the verification detects that the // given stargz file doesn't match to the expected digest and returns error. func checkVerifyInvalidStargzFail(invalid *io.SectionReader) check { - return func(t *TestRunner, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, controller TestingController, newController TestingControllerFactory) { + return func(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, controller TestingController, newController TestingControllerFactory) { cl := newController() rc, err := Build(invalid, WithChunkSize(chunkSize), WithCompression(cl)) if err != nil { @@ -1020,7 +990,7 @@ func checkVerifyInvalidStargzFail(invalid *io.SectionReader) check { // checkVerifyBrokenContentFail checks if the verifier detects broken contents // that doesn't match to the expected digest and returns error. func checkVerifyBrokenContentFail(filename string) check { - return func(t *TestRunner, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, controller TestingController, newController TestingControllerFactory) { + return func(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, controller TestingController, newController TestingControllerFactory) { // Parse stargz file sgz, err := Open( io.NewSectionReader(bytes.NewReader(sgzData), 0, int64(len(sgzData))), @@ -1077,9 +1047,9 @@ func chunkID(name string, offset, size int64) string { return fmt.Sprintf("%s-%d-%d", cleanEntryName(name), offset, size) } -type rewriteFunc func(t TestingT, toc *JTOC, sgz *io.SectionReader) +type rewriteFunc func(t *testing.T, toc *JTOC, sgz *io.SectionReader) -func rewriteTOCJSON(t TestingT, sgz *io.SectionReader, rewrite rewriteFunc, controller TestingController) (newSgz io.Reader, tocDigest digest.Digest) { +func rewriteTOCJSON(t *testing.T, sgz *io.SectionReader, rewrite rewriteFunc, controller TestingController) (newSgz io.Reader, tocDigest digest.Digest) { decodedJTOC, jtocOffset, err := parseStargz(sgz, controller) if err != nil { t.Fatalf("failed to extract TOC JSON: %v", err) @@ -1150,7 +1120,7 @@ func parseStargz(sgz *io.SectionReader, controller TestingController) (decodedJT return decodedJTOC, tocOffset, nil } -func testWriteAndOpen(t *TestRunner, controllers ...TestingControllerFactory) { +func testWriteAndOpen(t *testing.T, controllers ...TestingControllerFactory) { const content = "Some contents" invalidUtf8 := "\xff\xfe\xfd" @@ -1494,7 +1464,7 @@ func testWriteAndOpen(t *TestRunner, controllers ...TestingControllerFactory) { for _, srcTarFormat := range []tar.Format{tar.FormatUSTAR, tar.FormatPAX, tar.FormatGNU} { srcTarFormat := srcTarFormat for _, lossless := range []bool{true, false} { - t.Run(tt.name+"-"+fmt.Sprintf("compression=%v,prefix=%q,lossless=%v,format=%s", newCL(), prefix, lossless, srcTarFormat), func(t *TestRunner) { + t.Run(tt.name+"-"+fmt.Sprintf("compression=%v,prefix=%q,lossless=%v,format=%s", newCL(), prefix, lossless, srcTarFormat), func(t *testing.T) { var tr io.Reader = buildTar(t, tt.in, prefix, srcTarFormat) origTarDgstr := digest.Canonical.Digester() tr = io.TeeReader(tr, origTarDgstr.Hash()) @@ -1560,9 +1530,6 @@ func testWriteAndOpen(t *TestRunner, controllers ...TestingControllerFactory) { if err != nil { t.Fatalf("stargz.Open: %v", err) } - if _, ok := r.Lookup(""); !ok { - t.Fatalf("failed to lookup rootdir: %v", err) - } wantTOCVersion := 1 if tt.wantTOCVersion > 0 { wantTOCVersion = tt.wantTOCVersion @@ -1661,7 +1628,7 @@ func digestFor(content string) string { type numTOCEntries int -func (n numTOCEntries) check(t TestingT, r *Reader) { +func (n numTOCEntries) check(t *testing.T, r *Reader) { if r.toc == nil { t.Fatal("nil TOC") } @@ -1681,15 +1648,15 @@ func (n numTOCEntries) check(t TestingT, r *Reader) { func checks(s ...stargzCheck) []stargzCheck { return s } type stargzCheck interface { - check(t TestingT, r *Reader) + check(t *testing.T, r *Reader) } -type stargzCheckFn func(TestingT, *Reader) +type stargzCheckFn func(*testing.T, *Reader) -func (f stargzCheckFn) check(t TestingT, r *Reader) { f(t, r) } +func (f stargzCheckFn) check(t *testing.T, r *Reader) { f(t, r) } func maxDepth(max int) stargzCheck { - return stargzCheckFn(func(t TestingT, r *Reader) { + return stargzCheckFn(func(t *testing.T, r *Reader) { e, ok := r.Lookup("") if !ok { t.Fatal("root directory not found") @@ -1706,7 +1673,7 @@ func maxDepth(max int) stargzCheck { }) } -func getMaxDepth(t TestingT, e *TOCEntry, current, limit int) (max int, rErr error) { +func getMaxDepth(t *testing.T, e *TOCEntry, current, limit int) (max int, rErr error) { if current > limit { return -1, fmt.Errorf("walkMaxDepth: exceeds limit: current:%d > limit:%d", current, limit) @@ -1728,7 +1695,7 @@ func getMaxDepth(t TestingT, e *TOCEntry, current, limit int) (max int, rErr err } func hasFileLen(file string, wantLen int) stargzCheck { - return stargzCheckFn(func(t TestingT, r *Reader) { + return stargzCheckFn(func(t *testing.T, r *Reader) { for _, ent := range r.toc.Entries { if ent.Name == file { if ent.Type != "reg" { @@ -1744,7 +1711,7 @@ func hasFileLen(file string, wantLen int) stargzCheck { } func hasFileXattrs(file, name, value string) stargzCheck { - return stargzCheckFn(func(t TestingT, r *Reader) { + return stargzCheckFn(func(t *testing.T, r *Reader) { for _, ent := range r.toc.Entries { if ent.Name == file { if ent.Type != "reg" { @@ -1771,7 +1738,7 @@ func hasFileXattrs(file, name, value string) stargzCheck { } func hasFileDigest(file string, digest string) stargzCheck { - return stargzCheckFn(func(t TestingT, r *Reader) { + return stargzCheckFn(func(t *testing.T, r *Reader) { ent, ok := r.Lookup(file) if !ok { t.Fatalf("didn't find TOCEntry for file %q", file) @@ -1783,7 +1750,7 @@ func hasFileDigest(file string, digest string) stargzCheck { } func hasFileContentsWithPreRead(file string, offset int, want string, extra ...chunkInfo) stargzCheck { - return stargzCheckFn(func(t TestingT, r *Reader) { + return stargzCheckFn(func(t *testing.T, r *Reader) { extraMap := make(map[string]chunkInfo) for _, e := range extra { extraMap[e.name] = e @@ -1830,7 +1797,7 @@ func hasFileContentsWithPreRead(file string, offset int, want string, extra ...c } func hasFileContentsRange(file string, offset int, want string) stargzCheck { - return stargzCheckFn(func(t TestingT, r *Reader) { + return stargzCheckFn(func(t *testing.T, r *Reader) { f, err := r.OpenFile(file) if err != nil { t.Fatal(err) @@ -1847,7 +1814,7 @@ func hasFileContentsRange(file string, offset int, want string) stargzCheck { } func hasChunkEntries(file string, wantChunks int) stargzCheck { - return stargzCheckFn(func(t TestingT, r *Reader) { + return stargzCheckFn(func(t *testing.T, r *Reader) { ent, ok := r.Lookup(file) if !ok { t.Fatalf("no file for %q", file) @@ -1891,7 +1858,7 @@ func hasChunkEntries(file string, wantChunks int) stargzCheck { } func entryHasChildren(dir string, want ...string) stargzCheck { - return stargzCheckFn(func(t TestingT, r *Reader) { + return stargzCheckFn(func(t *testing.T, r *Reader) { want := append([]string(nil), want...) var got []string ent, ok := r.Lookup(dir) @@ -1910,7 +1877,7 @@ func entryHasChildren(dir string, want ...string) stargzCheck { } func hasDir(file string) stargzCheck { - return stargzCheckFn(func(t TestingT, r *Reader) { + return stargzCheckFn(func(t *testing.T, r *Reader) { for _, ent := range r.toc.Entries { if ent.Name == cleanEntryName(file) { if ent.Type != "dir" { @@ -1924,7 +1891,7 @@ func hasDir(file string) stargzCheck { } func hasDirLinkCount(file string, count int) stargzCheck { - return stargzCheckFn(func(t TestingT, r *Reader) { + return stargzCheckFn(func(t *testing.T, r *Reader) { for _, ent := range r.toc.Entries { if ent.Name == cleanEntryName(file) { if ent.Type != "dir" { @@ -1942,7 +1909,7 @@ func hasDirLinkCount(file string, count int) stargzCheck { } func hasMode(file string, mode os.FileMode) stargzCheck { - return stargzCheckFn(func(t TestingT, r *Reader) { + return stargzCheckFn(func(t *testing.T, r *Reader) { for _, ent := range r.toc.Entries { if ent.Name == cleanEntryName(file) { if ent.Stat().Mode() != mode { @@ -1957,7 +1924,7 @@ func hasMode(file string, mode os.FileMode) stargzCheck { } func hasSymlink(file, target string) stargzCheck { - return stargzCheckFn(func(t TestingT, r *Reader) { + return stargzCheckFn(func(t *testing.T, r *Reader) { for _, ent := range r.toc.Entries { if ent.Name == file { if ent.Type != "symlink" { @@ -1973,7 +1940,7 @@ func hasSymlink(file, target string) stargzCheck { } func lookupMatch(name string, want *TOCEntry) stargzCheck { - return stargzCheckFn(func(t TestingT, r *Reader) { + return stargzCheckFn(func(t *testing.T, r *Reader) { e, ok := r.Lookup(name) if !ok { t.Fatalf("failed to Lookup entry %q", name) @@ -1986,7 +1953,7 @@ func lookupMatch(name string, want *TOCEntry) stargzCheck { } func hasEntryOwner(entry string, owner owner) stargzCheck { - return stargzCheckFn(func(t TestingT, r *Reader) { + return stargzCheckFn(func(t *testing.T, r *Reader) { ent, ok := r.Lookup(strings.TrimSuffix(entry, "/")) if !ok { t.Errorf("entry %q not found", entry) @@ -2000,7 +1967,7 @@ func hasEntryOwner(entry string, owner owner) stargzCheck { } func mustSameEntry(files ...string) stargzCheck { - return stargzCheckFn(func(t TestingT, r *Reader) { + return stargzCheckFn(func(t *testing.T, r *Reader) { var first *TOCEntry for _, f := range files { if first == nil { @@ -2072,7 +2039,7 @@ func (f tarEntryFunc) appendTar(tw *tar.Writer, prefix string, format tar.Format return f(tw, prefix, format) } -func buildTar(t TestingT, ents []tarEntry, prefix string, opts ...interface{}) *io.SectionReader { +func buildTar(t *testing.T, ents []tarEntry, prefix string, opts ...interface{}) *io.SectionReader { format := tar.FormatUnknown for _, opt := range opts { switch v := opt.(type) { @@ -2281,7 +2248,7 @@ func noPrefetchLandmark() tarEntry { }) } -func regDigest(t TestingT, name string, contentStr string, digestMap map[string]digest.Digest) tarEntry { +func regDigest(t *testing.T, name string, contentStr string, digestMap map[string]digest.Digest) tarEntry { if digestMap == nil { t.Fatalf("digest map mustn't be nil") } @@ -2351,7 +2318,7 @@ func (f fileInfoOnlyMode) ModTime() time.Time { return time.Now() } func (f fileInfoOnlyMode) IsDir() bool { return os.FileMode(f).IsDir() } func (f fileInfoOnlyMode) Sys() interface{} { return nil } -func CheckGzipHasStreams(t TestingT, b []byte, streams []int64) { +func CheckGzipHasStreams(t *testing.T, b []byte, streams []int64) { if len(streams) == 0 { return // nop } @@ -2389,7 +2356,7 @@ func CheckGzipHasStreams(t TestingT, b []byte, streams []int64) { } } -func GzipDiffIDOf(t TestingT, b []byte) string { +func GzipDiffIDOf(t *testing.T, b []byte) string { h := sha256.New() zr, err := gzip.NewReader(bytes.NewReader(b)) if err != nil { diff --git a/vendor/github.com/google/go-containerregistry/pkg/crane/copy.go b/vendor/github.com/google/go-containerregistry/pkg/crane/copy.go index 40f315cc7..ad9fe8d38 100644 --- a/vendor/github.com/google/go-containerregistry/pkg/crane/copy.go +++ b/vendor/github.com/google/go-containerregistry/pkg/crane/copy.go @@ -26,10 +26,6 @@ import ( "golang.org/x/sync/errgroup" ) -// ErrRefusingToClobberExistingTag is returned when NoClobber is true and the -// tag already exists in the target registry/repo. -var ErrRefusingToClobberExistingTag = errors.New("refusing to clobber existing tag") - // Copy copies a remote image or index from src to dst. func Copy(src, dst string, opt ...Option) error { o := makeOptions(opt...) @@ -62,7 +58,7 @@ func Copy(src, dst string, opt ...Option) error { } if head != nil { - return fmt.Errorf("%w %s@%s", ErrRefusingToClobberExistingTag, tag, head.Digest) + return fmt.Errorf("refusing to clobber existing tag %s@%s", tag, head.Digest) } } } diff --git a/vendor/github.com/google/go-containerregistry/pkg/legacy/tarball/write.go b/vendor/github.com/google/go-containerregistry/pkg/legacy/tarball/write.go index aed3d4093..627bfbfdb 100644 --- a/vendor/github.com/google/go-containerregistry/pkg/legacy/tarball/write.go +++ b/vendor/github.com/google/go-containerregistry/pkg/legacy/tarball/write.go @@ -108,7 +108,7 @@ func newTopV1Layer(layer v1.Layer, parent *v1Layer, history v1.History, imgConfi result.config.ID = id result.config.Architecture = imgConfig.Architecture result.config.Container = imgConfig.Container - result.config.DockerVersion = imgConfig.DockerVersion //nolint:staticcheck // Field will be removed in next release + result.config.DockerVersion = imgConfig.DockerVersion result.config.OS = imgConfig.OS result.config.Config = imgConfig.Config result.config.Created = imgConfig.Created diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/config.go b/vendor/github.com/google/go-containerregistry/pkg/v1/config.go index b62d84826..960c93b5f 100644 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/config.go +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/config.go @@ -27,11 +27,10 @@ import ( // docker_version and os.version are not part of the spec but included // for backwards compatibility. type ConfigFile struct { - Architecture string `json:"architecture"` - Author string `json:"author,omitempty"` - Container string `json:"container,omitempty"` - Created Time `json:"created,omitempty"` - // Deprecated: This field is deprecated and will be removed in the next release. + Architecture string `json:"architecture"` + Author string `json:"author,omitempty"` + Container string `json:"container,omitempty"` + Created Time `json:"created,omitempty"` DockerVersion string `json:"docker_version,omitempty"` History []History `json:"history,omitempty"` OS string `json:"os"` diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/mutate/mutate.go b/vendor/github.com/google/go-containerregistry/pkg/v1/mutate/mutate.go index 409877bce..c04479600 100644 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/mutate/mutate.go +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/mutate/mutate.go @@ -514,7 +514,7 @@ func Canonical(img v1.Image) (v1.Image, error) { cfg.Container = "" cfg.Config.Hostname = "" - cfg.DockerVersion = "" //nolint:staticcheck // Field will be removed in next release + cfg.DockerVersion = "" return ConfigFile(img, cfg) } diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/options.go b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/options.go index 15b7da1e4..99a2bb2eb 100644 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/options.go +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/options.go @@ -162,14 +162,9 @@ func makeOptions(opts ...Option) (*options, error) { o.transport = transport.NewLogger(o.transport) } - // Using customized retry predicate if provided, and fallback to default if not. - predicate := o.retryPredicate - if predicate == nil { - predicate = defaultRetryPredicate - } - // Wrap the transport in something that can retry network flakes. - o.transport = transport.NewRetry(o.transport, transport.WithRetryBackoff(o.retryBackoff), transport.WithRetryPredicate(predicate), transport.WithRetryStatusCodes(o.retryStatusCodes...)) + o.transport = transport.NewRetry(o.transport, transport.WithRetryPredicate(defaultRetryPredicate), transport.WithRetryStatusCodes(o.retryStatusCodes...)) + // Wrap this last to prevent transport.New from double-wrapping. if o.userAgent != "" { o.transport = transport.NewUserAgent(o.transport, o.userAgent) diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/error.go b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/error.go index d38e67624..482a4adee 100644 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/error.go +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/error.go @@ -25,7 +25,7 @@ import ( ) // Error implements error to support the following error specification: -// https://github.com/distribution/distribution/blob/aac2f6c8b7c5a6c60190848bab5cbeed2b5ba0a9/docs/spec/api.md#errors +// https://github.com/docker/distribution/blob/master/docs/spec/api.md#errors type Error struct { Errors []Diagnostic `json:"errors,omitempty"` // The http status code returned. @@ -111,7 +111,7 @@ func (d Diagnostic) String() string { type ErrorCode string // The set of error conditions a registry may return: -// https://github.com/distribution/distribution/blob/aac2f6c8b7c5a6c60190848bab5cbeed2b5ba0a9/docs/spec/api.md#errors-2 +// https://github.com/docker/distribution/blob/master/docs/spec/api.md#errors-2 const ( BlobUnknownErrorCode ErrorCode = "BLOB_UNKNOWN" BlobUploadInvalidErrorCode ErrorCode = "BLOB_UPLOAD_INVALID" @@ -170,7 +170,7 @@ func CheckError(resp *http.Response, codes ...int) error { } func makeError(resp *http.Response, body []byte) *Error { - // https://github.com/distribution/distribution/blob/aac2f6c8b7c5a6c60190848bab5cbeed2b5ba0a9/docs/spec/api.md#errors + // https://github.com/docker/distribution/blob/master/docs/spec/api.md#errors structuredError := &Error{} // This can fail if e.g. the response body is not valid JSON. That's fine, diff --git a/vendor/github.com/vbatts/tar-split/archive/tar/common.go b/vendor/github.com/vbatts/tar-split/archive/tar/common.go index e687a08c9..dee9e47e4 100644 --- a/vendor/github.com/vbatts/tar-split/archive/tar/common.go +++ b/vendor/github.com/vbatts/tar-split/archive/tar/common.go @@ -34,7 +34,6 @@ var ( errMissData = errors.New("archive/tar: sparse file references non-existent data") errUnrefData = errors.New("archive/tar: sparse file contains unreferenced data") errWriteHole = errors.New("archive/tar: write non-NUL byte in sparse hole") - errSparseTooLong = errors.New("archive/tar: sparse map too long") ) type headerError []string diff --git a/vendor/github.com/vbatts/tar-split/archive/tar/reader.go b/vendor/github.com/vbatts/tar-split/archive/tar/reader.go index a645c4160..248a7ccb1 100644 --- a/vendor/github.com/vbatts/tar-split/archive/tar/reader.go +++ b/vendor/github.com/vbatts/tar-split/archive/tar/reader.go @@ -581,17 +581,12 @@ func readGNUSparseMap1x0(r io.Reader) (sparseDatas, error) { cntNewline int64 buf bytes.Buffer blk block - totalSize int ) // feedTokens copies data in blocks from r into buf until there are // at least cnt newlines in buf. It will not read more blocks than needed. feedTokens := func(n int64) error { for cntNewline < n { - totalSize += len(blk) - if totalSize > maxSpecialFileSize { - return errSparseTooLong - } if _, err := mustReadFull(r, blk[:]); err != nil { return err } @@ -624,8 +619,8 @@ func readGNUSparseMap1x0(r io.Reader) (sparseDatas, error) { } // Parse for all member entries. - // numEntries is trusted after this since feedTokens limits the number of - // tokens based on maxSpecialFileSize. + // numEntries is trusted after this since a potential attacker must have + // committed resources proportional to what this library used. if err := feedTokens(2 * numEntries); err != nil { return nil, err } diff --git a/vendor/golang.org/x/oauth2/deviceauth.go b/vendor/golang.org/x/oauth2/deviceauth.go index e783a9437..e99c92f39 100644 --- a/vendor/golang.org/x/oauth2/deviceauth.go +++ b/vendor/golang.org/x/oauth2/deviceauth.go @@ -6,7 +6,6 @@ import ( "errors" "fmt" "io" - "mime" "net/http" "net/url" "strings" @@ -117,38 +116,10 @@ func retrieveDeviceAuth(ctx context.Context, c *Config, v url.Values) (*DeviceAu return nil, fmt.Errorf("oauth2: cannot auth device: %v", err) } if code := r.StatusCode; code < 200 || code > 299 { - retrieveError := &RetrieveError{ + return nil, &RetrieveError{ Response: r, Body: body, } - - content, _, _ := mime.ParseMediaType(r.Header.Get("Content-Type")) - switch content { - case "application/x-www-form-urlencoded", "text/plain": - // some endpoints return a query string - vals, err := url.ParseQuery(string(body)) - if err != nil { - return nil, retrieveError - } - retrieveError.ErrorCode = vals.Get("error") - retrieveError.ErrorDescription = vals.Get("error_description") - retrieveError.ErrorURI = vals.Get("error_uri") - default: - var tj struct { - // https://datatracker.ietf.org/doc/html/rfc6749#section-5.2 - ErrorCode string `json:"error"` - ErrorDescription string `json:"error_description"` - ErrorURI string `json:"error_uri"` - } - if json.Unmarshal(body, &tj) != nil { - return nil, retrieveError - } - retrieveError.ErrorCode = tj.ErrorCode - retrieveError.ErrorDescription = tj.ErrorDescription - retrieveError.ErrorURI = tj.ErrorURI - } - - return nil, retrieveError } da := &DeviceAuthResponse{} diff --git a/vendor/golang.org/x/oauth2/oauth2.go b/vendor/golang.org/x/oauth2/oauth2.go index 5c527d31f..3e3b63069 100644 --- a/vendor/golang.org/x/oauth2/oauth2.go +++ b/vendor/golang.org/x/oauth2/oauth2.go @@ -98,7 +98,7 @@ const ( // in the POST body as application/x-www-form-urlencoded parameters. AuthStyleInParams AuthStyle = 1 - // AuthStyleInHeader sends the client_id and client_secret + // AuthStyleInHeader sends the client_id and client_password // using HTTP Basic Authorization. This is an optional style // described in the OAuth2 RFC 6749 section 2.3.1. AuthStyleInHeader AuthStyle = 2 diff --git a/vendor/golang.org/x/oauth2/pkce.go b/vendor/golang.org/x/oauth2/pkce.go index f99384f0f..cea8374d5 100644 --- a/vendor/golang.org/x/oauth2/pkce.go +++ b/vendor/golang.org/x/oauth2/pkce.go @@ -51,7 +51,7 @@ func S256ChallengeFromVerifier(verifier string) string { return base64.RawURLEncoding.EncodeToString(sha[:]) } -// S256ChallengeOption derives a PKCE code challenge from the verifier with +// S256ChallengeOption derives a PKCE code challenge derived from verifier with // method S256. It should be passed to [Config.AuthCodeURL] or [Config.DeviceAuth] // only. func S256ChallengeOption(verifier string) AuthCodeOption { diff --git a/vendor/golang.org/x/oauth2/token.go b/vendor/golang.org/x/oauth2/token.go index e995eebb5..239ec3296 100644 --- a/vendor/golang.org/x/oauth2/token.go +++ b/vendor/golang.org/x/oauth2/token.go @@ -103,7 +103,7 @@ func (t *Token) WithExtra(extra any) *Token { } // Extra returns an extra field. -// Extra fields are key-value pairs returned by the server as +// Extra fields are key-value pairs returned by the server as a // part of the token retrieval response. func (t *Token) Extra(key string) any { if raw, ok := t.raw.(map[string]any); ok { diff --git a/vendor/golang.org/x/oauth2/transport.go b/vendor/golang.org/x/oauth2/transport.go index 9922ec331..8bbebbac9 100644 --- a/vendor/golang.org/x/oauth2/transport.go +++ b/vendor/golang.org/x/oauth2/transport.go @@ -58,7 +58,7 @@ func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) { var cancelOnce sync.Once // CancelRequest does nothing. It used to be a legacy cancellation mechanism -// but now only logs on first use to warn that it's deprecated. +// but now only it only logs on first use to warn that it's deprecated. // // Deprecated: use contexts for cancellation instead. func (t *Transport) CancelRequest(req *http.Request) { diff --git a/vendor/modules.txt b/vendor/modules.txt index 1ab2a2b08..1afcfc5c4 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -171,8 +171,8 @@ github.com/containerd/log # github.com/containerd/platforms v0.2.1 ## explicit; go 1.20 github.com/containerd/platforms -# github.com/containerd/stargz-snapshotter/estargz v0.18.1 -## explicit; go 1.24.0 +# github.com/containerd/stargz-snapshotter/estargz v0.17.0 +## explicit; go 1.23.0 github.com/containerd/stargz-snapshotter/estargz github.com/containerd/stargz-snapshotter/estargz/errorutil # github.com/containerd/ttrpc v1.2.7 @@ -213,7 +213,7 @@ github.com/davecgh/go-spew/spew # github.com/distribution/reference v0.6.0 ## explicit; go 1.20 github.com/distribution/reference -# github.com/docker/cli v29.0.3+incompatible +# github.com/docker/cli v29.0.0+incompatible ## explicit github.com/docker/cli/cli/config github.com/docker/cli/cli/config/configfile @@ -225,7 +225,7 @@ github.com/docker/cli/cli/config/types github.com/docker/distribution/registry/api/errcode github.com/docker/distribution/registry/api/v2 github.com/docker/distribution/registry/client/auth/challenge -# github.com/docker/docker v28.5.2+incompatible +# github.com/docker/docker v28.5.1+incompatible ## explicit github.com/docker/docker/api/types/versions # github.com/docker/docker-credential-helpers v0.9.4 @@ -410,8 +410,8 @@ github.com/google/go-cmp/cmp/internal/diff github.com/google/go-cmp/cmp/internal/flags github.com/google/go-cmp/cmp/internal/function github.com/google/go-cmp/cmp/internal/value -# github.com/google/go-containerregistry v0.20.7 -## explicit; go 1.24.0 +# github.com/google/go-containerregistry v0.20.6 +## explicit; go 1.24 github.com/google/go-containerregistry/internal/and github.com/google/go-containerregistry/internal/compression github.com/google/go-containerregistry/internal/estargz @@ -791,7 +791,7 @@ github.com/ulikunitz/xz github.com/ulikunitz/xz/internal/hash github.com/ulikunitz/xz/internal/xlog github.com/ulikunitz/xz/lzma -# github.com/vbatts/tar-split v0.12.2 +# github.com/vbatts/tar-split v0.12.1 ## explicit; go 1.17 github.com/vbatts/tar-split/archive/tar github.com/vbatts/tar-split/tar/asm @@ -1018,7 +1018,7 @@ golang.org/x/net/internal/timeseries golang.org/x/net/proxy golang.org/x/net/trace golang.org/x/net/websocket -# golang.org/x/oauth2 v0.33.0 +# golang.org/x/oauth2 v0.32.0 ## explicit; go 1.24.0 golang.org/x/oauth2 golang.org/x/oauth2/internal