From f2fd34e7dc499871b8624e2847a13baf6d0507ec Mon Sep 17 00:00:00 2001 From: Nikita Korolev Date: Wed, 26 Nov 2025 18:30:50 +0300 Subject: [PATCH 01/71] feat(ci): configure nested e2e cluster for nightly tests Signed-off-by: Nikita Korolev --- .github/workflows/e2e-matrix.yml | 99 +++++++- .gitignore | 6 + test/dvp-over-dvp/Taskfile.yaml | 228 ++++++++++++++++++ .../charts/cluster-config/.helmignore | 23 ++ .../charts/cluster-config/Chart.yaml | 24 ++ .../templates/cluster-config.yaml | 47 ++++ .../templates/disabled-modules.yaml | 10 + .../cluster-config/templates/ingress.yaml | 17 ++ .../charts/cluster-config/templates/mc.yaml | 88 +++++++ .../charts/cluster-config/templates/nfs.yaml | 33 +++ .../charts/cluster-config/templates/ngc.yaml | 37 +++ .../cluster-config/templates/nodegroups.yaml | 40 +++ .../charts/cluster-config/templates/rbac.yaml | 20 ++ .../templates/virtualization.yaml | 25 ++ test/dvp-over-dvp/charts/infra/.helmignore | 23 ++ test/dvp-over-dvp/charts/infra/Chart.yaml | 24 ++ .../charts/infra/templates/ingress.yaml | 74 ++++++ .../infra/templates/jump-host/deploy.yaml | 38 +++ .../charts/infra/templates/jump-host/svc.yaml | 14 ++ .../infra/templates/nfs-server/deploy.yaml | 42 ++++ .../infra/templates/nfs-server/pvc.yaml | 13 + .../infra/templates/nfs-server/svc.yaml | 19 ++ .../charts/infra/templates/ns.yaml | 4 + .../charts/infra/templates/rbac/rbac.yaml | 28 +++ .../charts/infra/templates/vi.yaml | 12 + .../charts/infra/templates/vmc.yaml | 7 + test/dvp-over-dvp/values.example.yaml | 33 +++ 27 files changed, 1020 insertions(+), 8 deletions(-) create mode 100644 test/dvp-over-dvp/Taskfile.yaml create mode 100644 test/dvp-over-dvp/charts/cluster-config/.helmignore create mode 100644 test/dvp-over-dvp/charts/cluster-config/Chart.yaml create mode 100644 test/dvp-over-dvp/charts/cluster-config/templates/cluster-config.yaml create mode 100644 test/dvp-over-dvp/charts/cluster-config/templates/disabled-modules.yaml create mode 100644 test/dvp-over-dvp/charts/cluster-config/templates/ingress.yaml create mode 100644 test/dvp-over-dvp/charts/cluster-config/templates/mc.yaml create mode 100644 test/dvp-over-dvp/charts/cluster-config/templates/nfs.yaml create mode 100644 test/dvp-over-dvp/charts/cluster-config/templates/ngc.yaml create mode 100644 test/dvp-over-dvp/charts/cluster-config/templates/nodegroups.yaml create mode 100644 test/dvp-over-dvp/charts/cluster-config/templates/rbac.yaml create mode 100644 test/dvp-over-dvp/charts/cluster-config/templates/virtualization.yaml create mode 100644 test/dvp-over-dvp/charts/infra/.helmignore create mode 100644 test/dvp-over-dvp/charts/infra/Chart.yaml create mode 100644 test/dvp-over-dvp/charts/infra/templates/ingress.yaml create mode 100644 test/dvp-over-dvp/charts/infra/templates/jump-host/deploy.yaml create mode 100644 test/dvp-over-dvp/charts/infra/templates/jump-host/svc.yaml create mode 100644 test/dvp-over-dvp/charts/infra/templates/nfs-server/deploy.yaml create mode 100644 test/dvp-over-dvp/charts/infra/templates/nfs-server/pvc.yaml create mode 100644 test/dvp-over-dvp/charts/infra/templates/nfs-server/svc.yaml create mode 100644 test/dvp-over-dvp/charts/infra/templates/ns.yaml create mode 100644 test/dvp-over-dvp/charts/infra/templates/rbac/rbac.yaml create mode 100644 test/dvp-over-dvp/charts/infra/templates/vi.yaml create mode 100644 test/dvp-over-dvp/charts/infra/templates/vmc.yaml create mode 100644 test/dvp-over-dvp/values.example.yaml diff --git a/.github/workflows/e2e-matrix.yml b/.github/workflows/e2e-matrix.yml index d6ae02c147..cda9fbd7de 100644 --- a/.github/workflows/e2e-matrix.yml +++ b/.github/workflows/e2e-matrix.yml @@ -14,21 +14,104 @@ name: E2E Matrix Tests (bootstrap) +env: + BRANCH: main + VIRTUALIZATION_TAG: main + DECKHOUSE_TAG: main on: + workflow_dispatch: pull_request: types: [opened, reopened, synchronize, labeled, unlabeled] branches: - main - - feat/ci-e2e-matrix - workflow_dispatch: + - feat/ci/nightly-e2e-test-nested-env -permissions: - contents: read +defaults: + run: + shell: bash jobs: - noop: - name: Bootstrap + bootstrap: + name: Bootstrap cluster runs-on: ubuntu-latest steps: - - name: Say hello - run: echo "Bootstrap workflow OK" + - uses: actions/checkout@v4 + # with: + # ref: ${{ env.BRANCH }} + + - name: Set outputs + id: vars + run: echo "sha_short=$(git rev-parse --short HEAD)" >> $GITHUB_OUTPUT + + - name: Install htpasswd utility + run: | + sudo apt-get update + sudo apt-get install -y apache2-utils + + - name: Install Task + uses: arduino/setup-task@v2 + with: + version: 3.x + repo-token: ${{ secrets.GITHUB_TOKEN }} + + - name: Setup d8 + uses: ./.github/actions/install-d8 + + - name: Generate values.yaml + run: | + cat < test/dvp-over-dvp/values.yaml + namespace: nightly-e2e-${{ steps.vars.outputs.sha_short }} + clusterConfigurationPrefix: tst-dhctl + sa: dkp-sa + deckhouse: + tag: ${{ env.DECKHOUSE_TAG }} + kubernetesVersion: Automatic + registryDockerCfg: ${{ secrets.DEV_REGISTRY_DOCKER_CFG }} + virtualization: + tag: ${{ env.VIRTUALIZATION_TAG }} + image: + url: https://89d64382-20df-4581-8cc7-80df331f67fa.selstorage.ru/ubuntu/noble-server-cloudimg-amd64.img + defaultUser: ubuntu + bootloader: BIOS + ingressHosts: + - api + - grafana + - dex + - prometheus + - console + - virtualization + instances: + masterNodes: + count: 1 + cores: 4 + coreFraction: 50% + memory: 14Gi + additionalNodes: + - name: worker + count: 3 + cores: 4 + coreFraction: 25% + memory: 8Gi + nodeType: CloudEphemeral + EOF + + - uses: azure/k8s-set-context@v4 + with: + method: service-account + k8s-url: https://api.e2e.virtlab.flant.com + k8s-secret: ${{ secrets.E2E_VIRTUALIZATION_SA_SECRET }} + + - name: Bootstrap cluster + run: | + cd test/dvp-over-dvp + task install + + - name: Show nodes (test) + run: | + cd test/dvp-over-dvp + task kubectl -- get pods + - name: Show nodes (test) + if: ${{ always() }} + run: | + cd test/dvp-over-dvp + task infra-undeploy diff --git a/.gitignore b/.gitignore index ae343f44fd..63df742d42 100644 --- a/.gitignore +++ b/.gitignore @@ -46,6 +46,9 @@ local.Dockerfile # direnv .envrc +# dotenv file +.env + # logs log/ logs/ @@ -60,3 +63,6 @@ retry/ # nodejs node_modules/ package-lock.json + +# values +values.yaml diff --git a/test/dvp-over-dvp/Taskfile.yaml b/test/dvp-over-dvp/Taskfile.yaml new file mode 100644 index 0000000000..d9be74e75b --- /dev/null +++ b/test/dvp-over-dvp/Taskfile.yaml @@ -0,0 +1,228 @@ +# https://taskfile.dev + +version: "3" + +vars: + NAMESPACE: + sh: yq eval '.namespace' values.yaml + DEFAULT_USER: + sh: yq eval '.image.defaultUser' values.yaml + TMP_DIR: ./tmp + SSH_DIR: "{{ .TMP_DIR }}/ssh" + SSH_FILE_NAME: cloud + SSH_PUB_KEY_FILE: "{{ .SSH_DIR }}/{{ .SSH_FILE_NAME }}.pub" + SSH_PRIV_KEY_FILE: "{{ .SSH_DIR }}/{{ .SSH_FILE_NAME }}" + GENERATED_VALUES_FILE: tmp/generated-values.yaml + PASSWORD_FILE: "{{ .TMP_DIR }}/password.txt" + PASSWORD_HASH_FILE: "{{ .TMP_DIR }}/password-hash.txt" +tasks: + default: + silent: true + desc: Preflight / Check if all dependencies are installed + cmds: + - | + deps=("kubectl" "jq" "yq" "docker" "helm" "htpasswd") + for dep in "${deps[@]}"; do + if ! command -v "$dep" >/dev/null 2>&1; then + echo "Required utility '$dep' not found!" + exit 1 + fi + done + echo "All dependencies are installed!" + + password-gen: + desc: Preflight / Generate password for admin@deckhouse.io user + cmds: + - mkdir -p {{ .TMP_DIR }} + - date +%s | sha256sum | base64 | head -c 10 > {{ .PASSWORD_FILE }} + - | + echo $(cat {{ .TMP_DIR }}/password.txt) | htpasswd -BinC 10 "" | cut -d: -f2 | base64 -w0 > {{ .PASSWORD_HASH_FILE }} + status: + - test -f "{{ .PASSWORD_FILE }}" + - test -f "{{ .PASSWORD_HASH_FILE }}" + + ssh-gen: + desc: Preflight / Generate ssh keypair for jump-host + cmds: + - mkdir -p "{{ .SSH_DIR }}" + - yes | ssh-keygen -t ed25519 -b 1024 -f {{ .SSH_PRIV_KEY_FILE }} -N "" -C "cloud" -v + - chmod 0600 "{{ .SSH_PUB_KEY_FILE }}" + - chmod 0400 "{{ .SSH_PRIV_KEY_FILE }}" + status: + - test -f "{{ .SSH_PRIV_KEY_FILE }}" + + render-infra: + desc: Preparation / Generate infra manifests + deps: + - ssh-gen + cmds: + - touch {{ .GENERATED_VALUES_FILE }} + - | + export NEW_KUBECONFIG_B64="$(cat {{ .SSH_PUB_KEY_FILE }})" + yq eval --inplace '.sshPublicKey = env(NEW_KUBECONFIG_B64)' {{ .GENERATED_VALUES_FILE }} + - | + export DOMAIN=$(kubectl get mc global -o json | jq '.spec.settings.modules.publicDomainTemplate | split(".")[1:] | join(".")') + yq eval --inplace '.domain = env(DOMAIN)' {{ .GENERATED_VALUES_FILE }} + - helm template dvp-over-dvp-infra ./charts/infra -f values.yaml -f {{ .GENERATED_VALUES_FILE }} > {{ .TMP_DIR }}/infra.yaml + + infra-deploy: + deps: + - render-infra + desc: Deploy infra (Namespace/RBAC/Jumphost) + vars: + start_time: + sh: date +%s + cmds: + - kubectl apply -f {{ .TMP_DIR }}/infra.yaml + - kubectl -n {{ .NAMESPACE }} wait --for=condition=Ready pod -l app=jump-host --timeout=300s + - kubectl -n {{ .NAMESPACE }} wait --for=condition=Ready pod -l app=nfs-server --timeout=300s + - | + export end_time=$(date +%s) + difference=$((end_time - {{.start_time}})) + date -ud "@$difference" +'%H:%M:%S' + + infra-undeploy: + desc: Destroy infra (Namespace/RBAC/Jumphost/...) + prompt: This command will destroy current infra... Do you want to continue? + cmds: + - kubectl delete -f {{ .TMP_DIR }}/infra.yaml || true + - kubectl wait --for=delete namespace/{{ .NAMESPACE }} --timeout 300s || true + + render-kubeconfig: + desc: Preparation / Generate kubeconfig (infra required) + vars: + SERVER: + sh: echo https://$(kubectl -n d8-user-authn get ingress kubernetes-api -o json | jq .spec.rules[0].host -r) + CERT: + sh: kubectl -n d8-user-authn get secrets kubernetes-tls -o json | jq '.data."tls.crt"' -r + TOKEN: + sh: kubectl -n {{ .NAMESPACE }} get secret dkp-sa-secret -ojson | jq -r '.data.token' | base64 -d + silent: true + cmds: + - | + cat < {{ .TMP_DIR }}/kubeconfig.yaml + apiVersion: v1 + clusters: + - cluster: + server: {{ .SERVER }} + name: dvp + contexts: + - context: + cluster: dvp + namespace: {{ .NAMESPACE }} + user: {{ .NAMESPACE }}@dvp + name: {{ .NAMESPACE }}@dvp + current-context: {{ .NAMESPACE }}@dvp + kind: Config + preferences: {} + users: + - name: {{ .NAMESPACE }}@dvp + user: + token: {{ .TOKEN }} + EOF + + render-cluster-config: + desc: Preparation / Generate cluster config (infra required) + deps: + - render-kubeconfig + - password-gen + cmds: + - touch {{ .GENERATED_VALUES_FILE }} + - | + export PASSWORD_HASH="$(cat {{ .PASSWORD_HASH_FILE }})" + yq eval --inplace '.passwordHash = env(PASSWORD_HASH)' {{ .GENERATED_VALUES_FILE }} + - | + export NEW_KUBECONFIG_B64="$(cat {{ .TMP_DIR }}/kubeconfig.yaml | base64 -w 0)" + yq eval --inplace '.kubeconfigDataBase64 = env(NEW_KUBECONFIG_B64)' {{ .GENERATED_VALUES_FILE }} + - helm template dvp-over-dvp-cluster-config ./charts/cluster-config -f values.yaml -f {{ .GENERATED_VALUES_FILE }} > {{ .TMP_DIR }}/config.yaml + + dhctl-bootstrap: + desc: Bootstrap DKP over DVP + deps: + - render-cluster-config + vars: + start_time: + sh: date +%s + JUMPHOST_EXT_IP: + sh: kubectl -n {{ .NAMESPACE }} exec -it deployment/jump-host -- dig @resolver4.opendns.com myip.opendns.com +short + JUMPHOST_NODEPORT: + sh: kubectl -n {{ .NAMESPACE }} get svc jump-host -o json | jq '.spec.ports[] | select(.port==2222) | .nodePort' + cmds: + - | + docker run --pull=always -it \ + -v "{{ .TMP_DIR }}/config.yaml:/config.yaml" \ + -v "{{ .SSH_DIR }}:/tmp/.ssh/" \ + dev-registry.deckhouse.io/sys/deckhouse-oss/install:main \ + dhctl bootstrap \ + --config=/config.yaml \ + --ssh-agent-private-keys=/tmp/.ssh/{{ .SSH_FILE_NAME }} \ + --ssh-user={{ .DEFAULT_USER }} \ + --ssh-bastion-port={{ .JUMPHOST_NODEPORT }} \ + --ssh-bastion-host={{ .JUMPHOST_EXT_IP }} \ + --ssh-bastion-user=user \ + {{.CLI_ARGS}} + - | + export end_time=$(date +%s) + difference=$((end_time - {{.start_time}})) + date -ud "@$difference" +'%H:%M:%S' + + show-connection-info: + desc: Show connection info + vars: + DOMAIN: + sh: yq eval '.domain' {{ .GENERATED_VALUES_FILE }} + PASSWORD: + sh: cat {{ .PASSWORD_FILE }} + silent: true + cmds: + - echo "Connect to master task ssh-to-master" + - echo "Grafana URL https://grafana.{{ .NAMESPACE }}.{{ .DOMAIN }}" + - echo "Default user/password admin@deckhouse.io/{{ .PASSWORD}}" + + install: + cmds: + - task: infra-deploy + - task: dhctl-bootstrap + - task: show-connection-info + + ssh-to-master: + desc: ssh to master + vars: + MASTER_NAME: + sh: kubectl -n {{ .NAMESPACE }} get vm -l dvp.deckhouse.io/node-group=master -o jsonpath="{.items[0].metadata.name}" + cmds: + - /usr/bin/ssh -i {{ .SSH_PRIV_KEY_FILE }} -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o 'ProxyCommand=d8 v port-forward --stdio=true {{ .MASTER_NAME }}.{{ .NAMESPACE }} 22' {{ .DEFAULT_USER }}@{{ .MASTER_NAME }} + + kill-dvp-resources: + cmds: + - kubectl -n {{ .NAMESPACE }} delete vm --all --force --grace-period=0 + - kubectl -n {{ .NAMESPACE }} delete vd --all --force --grace-period=0 + - kubectl -n {{ .NAMESPACE }} delete vmip --all --force --grace-period=0 + + clean: + cmds: + - task: infra-undeploy + - rm -rf "{{ .TMP_DIR }}" + + __ssh-command: + silent: true + internal: true + vars: + MASTER_NAME: + sh: kubectl -n {{ .NAMESPACE }} get vm -l dvp.deckhouse.io/node-group=master -o jsonpath="{.items[0].metadata.name}" + cmds: + - /usr/bin/ssh -t -i {{ .SSH_PRIV_KEY_FILE }} -o LogLevel=ERROR -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o 'ProxyCommand=d8 v port-forward --stdio=true {{ .MASTER_NAME }}.{{ .NAMESPACE }} 22' {{ .DEFAULT_USER }}@{{ .MASTER_NAME }} {{ .CMD }} + + kubectl: + desc: Run kubectl on master. Usage example "task kubectl -- get pods -A" + cmds: + - task: __ssh-command + vars: + CMD: sudo /opt/deckhouse/bin/kubectl {{ .CLI_ARGS }} + + k9s: + desc: Run kubectl on master. Usage example "task kubectl -- get pods -A" + cmds: + - task: __ssh-command + vars: + CMD: TERM=xterm-256color sudo /usr/local/bin/k9s {{ .CLI_ARGS }} diff --git a/test/dvp-over-dvp/charts/cluster-config/.helmignore b/test/dvp-over-dvp/charts/cluster-config/.helmignore new file mode 100644 index 0000000000..0e8a0eb36f --- /dev/null +++ b/test/dvp-over-dvp/charts/cluster-config/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*.orig +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/test/dvp-over-dvp/charts/cluster-config/Chart.yaml b/test/dvp-over-dvp/charts/cluster-config/Chart.yaml new file mode 100644 index 0000000000..c61a43f29a --- /dev/null +++ b/test/dvp-over-dvp/charts/cluster-config/Chart.yaml @@ -0,0 +1,24 @@ +apiVersion: v2 +name: cluster-config +description: A Helm chart for Kubernetes + +# A chart can be either an 'application' or a 'library' chart. +# +# Application charts are a collection of templates that can be packaged into versioned archives +# to be deployed. +# +# Library charts provide useful utilities or functions for the chart developer. They're included as +# a dependency of application charts to inject those utilities and functions into the rendering +# pipeline. Library charts do not define any templates and therefore cannot be deployed. +type: application + +# This is the chart version. This version number should be incremented each time you make changes +# to the chart and its templates, including the app version. +# Versions are expected to follow Semantic Versioning (https://semver.org/) +version: 0.1.0 + +# This is the version number of the application being deployed. This version number should be +# incremented each time you make changes to the application. Versions are not expected to +# follow Semantic Versioning. They should reflect the version the application is using. +# It is recommended to use it with quotes. +appVersion: "1.16.0" diff --git a/test/dvp-over-dvp/charts/cluster-config/templates/cluster-config.yaml b/test/dvp-over-dvp/charts/cluster-config/templates/cluster-config.yaml new file mode 100644 index 0000000000..c9dbc7cf2b --- /dev/null +++ b/test/dvp-over-dvp/charts/cluster-config/templates/cluster-config.yaml @@ -0,0 +1,47 @@ +apiVersion: deckhouse.io/v1 +kind: ClusterConfiguration +clusterType: Cloud +cloud: + provider: DVP + prefix: {{ .Values.clusterConfigurationPrefix | default "e2e" }} +podSubnetCIDR: 10.112.0.0/16 +serviceSubnetCIDR: 10.223.0.0/16 +kubernetesVersion: "{{ .Values.deckhouse.kubernetesVersion }}" +clusterDomain: "internal.cluster.local" +--- +apiVersion: deckhouse.io/v1 +kind: InitConfiguration +deckhouse: + imagesRepo: dev-registry.deckhouse.io/sys/deckhouse-oss + registryDockerCfg: {{ .Values.deckhouse.registryDockerCfg }} + devBranch: {{ .Values.deckhouse.tag }} +--- +apiVersion: deckhouse.io/v1 +kind: DVPClusterConfiguration +layout: Standard +sshPublicKey: {{ .Values.sshPublicKey }} +masterNodeGroup: + replicas: {{ .Values.instances.masterNodes.count }} + instanceClass: + virtualMachine: + bootloader: {{ .Values.image.bootloader }} + cpu: + cores: {{ .Values.instances.masterNodes.cores }} + coreFraction: {{ .Values.instances.masterNodes.coreFraction }} + memory: + size: {{ .Values.instances.masterNodes.memory }} + ipAddresses: + - Auto + virtualMachineClassName: "{{ .Values.namespace }}-cpu" + rootDisk: + size: 50Gi + storageClass: {{ .Values.storageClass }} + image: + kind: VirtualImage + name: image + etcdDisk: + size: 15Gi + storageClass: {{ .Values.storageClass }} +provider: + kubeconfigDataBase64: {{ .Values.kubeconfigDataBase64 }} + namespace: {{ .Values.namespace }} diff --git a/test/dvp-over-dvp/charts/cluster-config/templates/disabled-modules.yaml b/test/dvp-over-dvp/charts/cluster-config/templates/disabled-modules.yaml new file mode 100644 index 0000000000..2887a2b168 --- /dev/null +++ b/test/dvp-over-dvp/charts/cluster-config/templates/disabled-modules.yaml @@ -0,0 +1,10 @@ +{{- $modules := list "upmeter" "local-path-provisioner" "pod-reloader" "secret-copier" "namespace-configurator" -}} +{{ range $modules }} +--- +apiVersion: deckhouse.io/v1alpha1 +kind: ModuleConfig +metadata: + name: {{ . }} +spec: + enabled: false +{{ end }} diff --git a/test/dvp-over-dvp/charts/cluster-config/templates/ingress.yaml b/test/dvp-over-dvp/charts/cluster-config/templates/ingress.yaml new file mode 100644 index 0000000000..387a3c89bc --- /dev/null +++ b/test/dvp-over-dvp/charts/cluster-config/templates/ingress.yaml @@ -0,0 +1,17 @@ +--- +apiVersion: deckhouse.io/v1 +kind: IngressNginxController +metadata: + name: main +spec: + inlet: HostPort + enableIstioSidecar: false + ingressClass: nginx + hostPort: + httpPort: 80 + httpsPort: 443 + nodeSelector: + node-role.kubernetes.io/master: '' + tolerations: + - effect: NoSchedule + operator: Exists diff --git a/test/dvp-over-dvp/charts/cluster-config/templates/mc.yaml b/test/dvp-over-dvp/charts/cluster-config/templates/mc.yaml new file mode 100644 index 0000000000..dacae1acd3 --- /dev/null +++ b/test/dvp-over-dvp/charts/cluster-config/templates/mc.yaml @@ -0,0 +1,88 @@ +--- +apiVersion: deckhouse.io/v1alpha1 +kind: ModuleConfig +metadata: + name: deckhouse +spec: + version: 1 + enabled: true + settings: + bundle: Default + logLevel: Info +--- +apiVersion: deckhouse.io/v1alpha1 +kind: ModuleConfig +metadata: + name: global +spec: + version: 1 + settings: + defaultClusterStorageClass: nfs + modules: + publicDomainTemplate: "%s.{{ .Values.namespace }}.{{ .Values.domain }}" +--- +apiVersion: deckhouse.io/v1alpha1 +kind: ModuleConfig +metadata: + name: user-authn +spec: + version: 1 + enabled: true + settings: + controlPlaneConfigurator: + dexCAMode: DoNotNeed + publishAPI: + enabled: true + https: + mode: Global + global: + kubeconfigGeneratorMasterCA: "" +--- +apiVersion: deckhouse.io/v1alpha1 +kind: ModuleConfig +metadata: + name: user-authz +spec: + enabled: true +--- +apiVersion: deckhouse.io/v1alpha1 +kind: ModuleConfig +metadata: + name: cni-cilium +spec: + version: 1 + enabled: true + settings: + tunnelMode: VXLAN +--- +apiVersion: deckhouse.io/v1alpha1 +kind: ModuleConfig +metadata: + name: prompp +spec: + version: 1 + enabled: true +--- +apiVersion: deckhouse.io/v1alpha2 +kind: ModulePullOverride +metadata: + name: prompp +spec: + imageTag: stable + scanInterval: 15s +--- +apiVersion: deckhouse.io/v1alpha1 +kind: ModuleConfig +metadata: + name: snapshot-controller +spec: + version: 1 + enabled: true +--- +apiVersion: deckhouse.io/v1alpha2 +kind: ModulePullOverride +metadata: + name: snapshot-controller +spec: + imageTag: main + scanInterval: 15s diff --git a/test/dvp-over-dvp/charts/cluster-config/templates/nfs.yaml b/test/dvp-over-dvp/charts/cluster-config/templates/nfs.yaml new file mode 100644 index 0000000000..d2b4ff4666 --- /dev/null +++ b/test/dvp-over-dvp/charts/cluster-config/templates/nfs.yaml @@ -0,0 +1,33 @@ +--- +apiVersion: deckhouse.io/v1alpha1 +kind: ModuleConfig +metadata: + name: csi-nfs +spec: + source: deckhouse + enabled: true + version: 1 +--- +apiVersion: deckhouse.io/v1alpha2 +kind: ModulePullOverride +metadata: + name: csi-nfs +spec: + imageTag: main + scanInterval: 10m +--- +apiVersion: storage.deckhouse.io/v1alpha1 +kind: NFSStorageClass +metadata: + name: nfs +spec: + connection: + host: "nfs-server.{{ .Values.namespace }}.svc.cluster.local" + share: / + nfsVersion: "4.2" + mountOptions: + mountMode: hard + timeout: 60 + retransmissions: 3 + reclaimPolicy: Delete + volumeBindingMode: Immediate diff --git a/test/dvp-over-dvp/charts/cluster-config/templates/ngc.yaml b/test/dvp-over-dvp/charts/cluster-config/templates/ngc.yaml new file mode 100644 index 0000000000..3672dc8e79 --- /dev/null +++ b/test/dvp-over-dvp/charts/cluster-config/templates/ngc.yaml @@ -0,0 +1,37 @@ +--- +apiVersion: deckhouse.io/v1alpha1 +kind: NodeGroupConfiguration +metadata: + name: qemu-guest-agent-install-ubuntu.sh +spec: + weight: 98 + nodeGroups: ["*"] + bundles: ["ubuntu-lts", "debian"] + content: | + bb-apt-install qemu-guest-agent + systemctl enable --now qemu-guest-agent +--- +apiVersion: deckhouse.io/v1alpha1 +kind: NodeGroupConfiguration +metadata: + name: install-tools.sh +spec: + weight: 98 + nodeGroups: ["*"] + bundles: ["*"] + content: | + bb-sync-file /etc/profile.d/01-kubectl-aliases.sh - << "EOF" + source <(/opt/deckhouse/bin/kubectl completion bash) + alias k=kubectl + complete -o default -F __start_kubectl k + EOF + + if [ ! -f /usr/local/bin/k9s ]; then + K9S_URL=$(curl -s https://api.github.com/repos/derailed/k9s/releases/latest | jq '.assets[] | select(.name=="k9s_Linux_amd64.tar.gz") | .browser_download_url' -r) + curl -L "${K9S_URL}" | tar -xz -C /usr/local/bin/ "k9s" + fi + + if [ ! -f /usr/local/bin/stern ]; then + STERN_URL=$(curl -s https://api.github.com/repos/stern/stern/releases/latest | jq '.assets[].browser_download_url | select(. | test("linux_amd64"))' -r) + curl -L "${STERN_URL}" | tar -xz -C /usr/local/bin/ "stern" + fi diff --git a/test/dvp-over-dvp/charts/cluster-config/templates/nodegroups.yaml b/test/dvp-over-dvp/charts/cluster-config/templates/nodegroups.yaml new file mode 100644 index 0000000000..4025e441b7 --- /dev/null +++ b/test/dvp-over-dvp/charts/cluster-config/templates/nodegroups.yaml @@ -0,0 +1,40 @@ +{{ range .Values.instances.additionalNodes }} +--- +apiVersion: deckhouse.io/v1alpha1 +kind: DVPInstanceClass +metadata: + name: {{ .name }} +spec: + virtualMachine: + virtualMachineClassName: "{{ $.Values.namespace }}-cpu" + cpu: + cores: {{ .cores }} + coreFraction: {{ .coreFraction }} + memory: + size: {{ .memory }} + bootloader: {{ $.Values.image.bootloader }} + rootDisk: + size: 50Gi + storageClass: {{ $.Values.storageClass }} + image: + kind: VirtualImage + name: image +--- +apiVersion: deckhouse.io/v1 +kind: NodeGroup +metadata: + name: {{ .name }} +spec: +{{- if eq .name "system" }} + nodeTemplate: + labels: + node-role.deckhouse.io/system: "" +{{- end }} + nodeType: {{ .nodeType | default "CloudEphemeral" }} + cloudInstances: + minPerZone: {{ .count }} + maxPerZone: {{ .count }} + classReference: + kind: DVPInstanceClass + name: {{ .name }} +{{ end }} diff --git a/test/dvp-over-dvp/charts/cluster-config/templates/rbac.yaml b/test/dvp-over-dvp/charts/cluster-config/templates/rbac.yaml new file mode 100644 index 0000000000..6b8998a1e8 --- /dev/null +++ b/test/dvp-over-dvp/charts/cluster-config/templates/rbac.yaml @@ -0,0 +1,20 @@ +--- +apiVersion: deckhouse.io/v1 +kind: ClusterAuthorizationRule +metadata: + name: admin +spec: + subjects: + - kind: User + name: admin@deckhouse.io + accessLevel: SuperAdmin + portForwarding: true +--- +apiVersion: deckhouse.io/v1 +kind: User +metadata: + name: admin +spec: + email: admin@deckhouse.io + # echo "t3chn0l0gi4" | htpasswd -BinC 10 "" | cut -d: -f2 | base64 -w0 + password: {{ .Values.passwordHash }} diff --git a/test/dvp-over-dvp/charts/cluster-config/templates/virtualization.yaml b/test/dvp-over-dvp/charts/cluster-config/templates/virtualization.yaml new file mode 100644 index 0000000000..c5a27c5749 --- /dev/null +++ b/test/dvp-over-dvp/charts/cluster-config/templates/virtualization.yaml @@ -0,0 +1,25 @@ +--- +apiVersion: deckhouse.io/v1alpha1 +kind: ModuleConfig +metadata: + name: virtualization +spec: + enabled: true + settings: + dvcr: + storage: + persistentVolumeClaim: + size: 10Gi + # storageClassName: linstor-thin-r1 + type: PersistentVolumeClaim + virtualMachineCIDRs: + - 192.168.10.0/24 + version: 1 +--- +apiVersion: deckhouse.io/v1alpha2 +kind: ModulePullOverride +metadata: + name: virtualization +spec: + imageTag: {{ .Values.virtualization.tag }} + scanInterval: 15s diff --git a/test/dvp-over-dvp/charts/infra/.helmignore b/test/dvp-over-dvp/charts/infra/.helmignore new file mode 100644 index 0000000000..0e8a0eb36f --- /dev/null +++ b/test/dvp-over-dvp/charts/infra/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*.orig +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/test/dvp-over-dvp/charts/infra/Chart.yaml b/test/dvp-over-dvp/charts/infra/Chart.yaml new file mode 100644 index 0000000000..e0ab20a245 --- /dev/null +++ b/test/dvp-over-dvp/charts/infra/Chart.yaml @@ -0,0 +1,24 @@ +apiVersion: v2 +name: infra +description: A Helm chart for Kubernetes + +# A chart can be either an 'application' or a 'library' chart. +# +# Application charts are a collection of templates that can be packaged into versioned archives +# to be deployed. +# +# Library charts provide useful utilities or functions for the chart developer. They're included as +# a dependency of application charts to inject those utilities and functions into the rendering +# pipeline. Library charts do not define any templates and therefore cannot be deployed. +type: application + +# This is the chart version. This version number should be incremented each time you make changes +# to the chart and its templates, including the app version. +# Versions are expected to follow Semantic Versioning (https://semver.org/) +version: 0.1.0 + +# This is the version number of the application being deployed. This version number should be +# incremented each time you make changes to the application. Versions are not expected to +# follow Semantic Versioning. They should reflect the version the application is using. +# It is recommended to use it with quotes. +appVersion: "1.16.0" diff --git a/test/dvp-over-dvp/charts/infra/templates/ingress.yaml b/test/dvp-over-dvp/charts/infra/templates/ingress.yaml new file mode 100644 index 0000000000..b813234319 --- /dev/null +++ b/test/dvp-over-dvp/charts/infra/templates/ingress.yaml @@ -0,0 +1,74 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: dvp-over-dvp-80 + namespace: {{ .Values.namespace }} +spec: + ports: + - port: 80 + targetPort: 80 + protocol: TCP + name: http + selector: + dvp.deckhouse.io/node-group: master +--- +apiVersion: v1 +kind: Service +metadata: + name: dvp-over-dvp-443 + namespace: {{ .Values.namespace }} +spec: + ports: + - port: 443 + targetPort: 443 + protocol: TCP + name: https + selector: + dvp.deckhouse.io/node-group: master +--- +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: wildcard-https + namespace: {{ .Values.namespace }} + annotations: + nginx.ingress.kubernetes.io/ssl-passthrough: "true" + nginx.ingress.kubernetes.io/backend-protocol: "HTTPS" +spec: + ingressClassName: nginx + rules: + {{- range .Values.ingressHosts }} + - host: "{{ . }}.{{ $.Values.namespace }}.{{ $.Values.domain }}" + http: + paths: + - path: / + pathType: Prefix + backend: + service: + name: dvp-over-dvp-443 + port: + number: 443 + {{- end }} +--- +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: wildcard-http + namespace: {{ .Values.namespace }} + annotations: + nginx.ingress.kubernetes.io/ssl-redirect: "false" + nginx.ingress.kubernetes.io/rewrite-target: / +spec: + ingressClassName: nginx + rules: + - host: "*.{{ .Values.namespace }}.{{ .Values.domain }}" + http: + paths: + - path: / + pathType: Prefix + backend: + service: + name: dvp-over-dvp-80 + port: + number: 80 diff --git a/test/dvp-over-dvp/charts/infra/templates/jump-host/deploy.yaml b/test/dvp-over-dvp/charts/infra/templates/jump-host/deploy.yaml new file mode 100644 index 0000000000..e76f76dbd0 --- /dev/null +++ b/test/dvp-over-dvp/charts/infra/templates/jump-host/deploy.yaml @@ -0,0 +1,38 @@ +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: jump-host + namespace: {{ .Values.namespace }} +spec: + replicas: 1 + selector: + matchLabels: + app: jump-host + template: + metadata: + labels: + app: jump-host + spec: + containers: + - name: jump-host + image: registry-dvp.dev.flant.dev/tools/jump-host:v0.1.2 + imagePullPolicy: Always + resources: + limits: + cpu: "200m" + memory: "200Mi" + requests: + cpu: "200m" + memory: "200Mi" + ports: + - containerPort: 2222 + env: + - name: SSH_KEY + value: "{{ .Values.sshPublicKey }}" + securityContext: + runAsNonRoot: true + runAsUser: 1000 + securityContext: + runAsNonRoot: true + runAsUser: 1000 diff --git a/test/dvp-over-dvp/charts/infra/templates/jump-host/svc.yaml b/test/dvp-over-dvp/charts/infra/templates/jump-host/svc.yaml new file mode 100644 index 0000000000..cacb3421ab --- /dev/null +++ b/test/dvp-over-dvp/charts/infra/templates/jump-host/svc.yaml @@ -0,0 +1,14 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: jump-host + namespace: {{ .Values.namespace }} +spec: + type: NodePort + selector: + app: jump-host + ports: + - protocol: TCP + port: 2222 + targetPort: 2222 diff --git a/test/dvp-over-dvp/charts/infra/templates/nfs-server/deploy.yaml b/test/dvp-over-dvp/charts/infra/templates/nfs-server/deploy.yaml new file mode 100644 index 0000000000..e3b934ec9c --- /dev/null +++ b/test/dvp-over-dvp/charts/infra/templates/nfs-server/deploy.yaml @@ -0,0 +1,42 @@ +--- +kind: Deployment +apiVersion: apps/v1 +metadata: + name: nfs-server + namespace: {{ .Values.namespace }} +spec: + replicas: 1 + selector: + matchLabels: + app: nfs-server + template: + metadata: + name: nfs-server + labels: + app: nfs-server + spec: + nodeSelector: + "kubernetes.io/os": linux + containers: + - name: nfs-server + image: itsthenetwork/nfs-server-alpine:latest + imagePullPolicy: IfNotPresent + env: + - name: SHARED_DIRECTORY + value: "/exports" + volumeMounts: + - mountPath: /exports + name: nfs-data + ports: + - name: tcp-2049 + containerPort: 2049 + protocol: TCP + - name: udp-111 + containerPort: 111 + protocol: UDP + securityContext: + privileged: true + volumes: + - name: nfs-data + persistentVolumeClaim: + claimName: nfs-data diff --git a/test/dvp-over-dvp/charts/infra/templates/nfs-server/pvc.yaml b/test/dvp-over-dvp/charts/infra/templates/nfs-server/pvc.yaml new file mode 100644 index 0000000000..e19ba3f190 --- /dev/null +++ b/test/dvp-over-dvp/charts/infra/templates/nfs-server/pvc.yaml @@ -0,0 +1,13 @@ +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: nfs-data + namespace: {{ .Values.namespace }} +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 50Gi + storageClassName: {{ .Values.storageClass }} diff --git a/test/dvp-over-dvp/charts/infra/templates/nfs-server/svc.yaml b/test/dvp-over-dvp/charts/infra/templates/nfs-server/svc.yaml new file mode 100644 index 0000000000..0aca8064da --- /dev/null +++ b/test/dvp-over-dvp/charts/infra/templates/nfs-server/svc.yaml @@ -0,0 +1,19 @@ +--- +kind: Service +apiVersion: v1 +metadata: + name: nfs-server + namespace: {{ .Values.namespace }} + labels: + app: nfs-server +spec: + type: ClusterIP + selector: + app: nfs-server + ports: + - name: tcp-2049 + port: 2049 + protocol: TCP + - name: udp-111 + port: 111 + protocol: UDP diff --git a/test/dvp-over-dvp/charts/infra/templates/ns.yaml b/test/dvp-over-dvp/charts/infra/templates/ns.yaml new file mode 100644 index 0000000000..77db5f9f65 --- /dev/null +++ b/test/dvp-over-dvp/charts/infra/templates/ns.yaml @@ -0,0 +1,4 @@ +apiVersion: v1 +kind: Namespace +metadata: + name: {{ .Values.namespace }} diff --git a/test/dvp-over-dvp/charts/infra/templates/rbac/rbac.yaml b/test/dvp-over-dvp/charts/infra/templates/rbac/rbac.yaml new file mode 100644 index 0000000000..9dec96bfa3 --- /dev/null +++ b/test/dvp-over-dvp/charts/infra/templates/rbac/rbac.yaml @@ -0,0 +1,28 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ .Values.sa}} + namespace: {{ .Values.namespace }} +--- +apiVersion: v1 +kind: Secret +metadata: + name: {{ .Values.sa}}-secret + namespace: {{ .Values.namespace }} + annotations: + kubernetes.io/service-account.name: {{ .Values.sa}} +type: kubernetes.io/service-account-token +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: {{ .Values.sa}}-rb + namespace: {{ .Values.namespace }} +subjects: + - kind: ServiceAccount + name: {{ .Values.sa}} + namespace: {{ .Values.namespace }} +roleRef: + kind: ClusterRole + name: d8:use:role:manager + apiGroup: rbac.authorization.k8s.io diff --git a/test/dvp-over-dvp/charts/infra/templates/vi.yaml b/test/dvp-over-dvp/charts/infra/templates/vi.yaml new file mode 100644 index 0000000000..66034a649d --- /dev/null +++ b/test/dvp-over-dvp/charts/infra/templates/vi.yaml @@ -0,0 +1,12 @@ +--- +apiVersion: virtualization.deckhouse.io/v1alpha2 +kind: VirtualImage +metadata: + name: image + namespace: {{ .Values.namespace }} +spec: + storage: ContainerRegistry + dataSource: + type: HTTP + http: + url: {{ .Values.image.url }} diff --git a/test/dvp-over-dvp/charts/infra/templates/vmc.yaml b/test/dvp-over-dvp/charts/infra/templates/vmc.yaml new file mode 100644 index 0000000000..39330ced39 --- /dev/null +++ b/test/dvp-over-dvp/charts/infra/templates/vmc.yaml @@ -0,0 +1,7 @@ +apiVersion: virtualization.deckhouse.io/v1alpha2 +kind: VirtualMachineClass +metadata: + name: "{{ .Values.namespace }}-cpu" +spec: + cpu: + type: Discovery diff --git a/test/dvp-over-dvp/values.example.yaml b/test/dvp-over-dvp/values.example.yaml new file mode 100644 index 0000000000..a25055d26d --- /dev/null +++ b/test/dvp-over-dvp/values.example.yaml @@ -0,0 +1,33 @@ +storageClass: rv-thin-r1 +namespace: kek +clusterConfigurationPrefix: demo-cluster +sa: dkp-sa +deckhouse: + tag: main + kubernetesVersion: Automatic + registryDockerCfg: # <-- Put license key here +virtualization: + tag: main +image: + url: https://89d64382-20df-4581-8cc7-80df331f67fa.selstorage.ru/ubuntu/noble-server-cloudimg-amd64.img + defaultUser: ubuntu + bootloader: EFI +ingressHosts: + - api + - grafana + - dex + - prometheus + - console + - virtualization +instances: + masterNodes: + count: 1 + cores: 4 + coreFraction: 50% + memory: 14Gi + additionalNodes: + - name: worker + count: 3 + cores: 4 + coreFraction: 50% + memory: 12Gi From 47612d0bcd60d8164a6442544fa33a9004cb9e7d Mon Sep 17 00:00:00 2001 From: Nikita Korolev Date: Wed, 26 Nov 2025 20:07:05 +0300 Subject: [PATCH 02/71] change token Signed-off-by: Nikita Korolev --- .github/workflows/e2e-matrix.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/e2e-matrix.yml b/.github/workflows/e2e-matrix.yml index cda9fbd7de..d0d3632ec3 100644 --- a/.github/workflows/e2e-matrix.yml +++ b/.github/workflows/e2e-matrix.yml @@ -99,7 +99,7 @@ jobs: with: method: service-account k8s-url: https://api.e2e.virtlab.flant.com - k8s-secret: ${{ secrets.E2E_VIRTUALIZATION_SA_SECRET }} + k8s-secret: ${{ secrets.VIRT_E2E_NIGHTLY_SA_TOKEN }} - name: Bootstrap cluster run: | From d19feae96c6ddece25467b09fdde73ac58b8aa48 Mon Sep 17 00:00:00 2001 From: Nikita Korolev Date: Wed, 26 Nov 2025 20:30:13 +0300 Subject: [PATCH 03/71] fix ask fo del Signed-off-by: Nikita Korolev --- .github/workflows/e2e-matrix.yml | 13 +++++++++++-- test/dvp-over-dvp/Taskfile.yaml | 1 - 2 files changed, 11 insertions(+), 3 deletions(-) diff --git a/.github/workflows/e2e-matrix.yml b/.github/workflows/e2e-matrix.yml index d0d3632ec3..cf997b56ee 100644 --- a/.github/workflows/e2e-matrix.yml +++ b/.github/workflows/e2e-matrix.yml @@ -94,12 +94,21 @@ jobs: memory: 8Gi nodeType: CloudEphemeral EOF - - - uses: azure/k8s-set-context@v4 + + - name: Test + run: | + echo "before run azure/k8s-set-context@v4" + - name: Configure kubectl via azure/k8s-set-context@v4 + uses: azure/k8s-set-context@v4 with: method: service-account k8s-url: https://api.e2e.virtlab.flant.com k8s-secret: ${{ secrets.VIRT_E2E_NIGHTLY_SA_TOKEN }} + + - name: Test after azure/k8s-set-context@v4 + run: | + echo "after run azure/k8s-set-context@v4" + kubectl get pods - name: Bootstrap cluster run: | diff --git a/test/dvp-over-dvp/Taskfile.yaml b/test/dvp-over-dvp/Taskfile.yaml index d9be74e75b..13668e5e77 100644 --- a/test/dvp-over-dvp/Taskfile.yaml +++ b/test/dvp-over-dvp/Taskfile.yaml @@ -83,7 +83,6 @@ tasks: infra-undeploy: desc: Destroy infra (Namespace/RBAC/Jumphost/...) - prompt: This command will destroy current infra... Do you want to continue? cmds: - kubectl delete -f {{ .TMP_DIR }}/infra.yaml || true - kubectl wait --for=delete namespace/{{ .NAMESPACE }} --timeout 300s || true From 75231bcb18f033f2c8b91dbb71f98054e708f99e Mon Sep 17 00:00:00 2001 From: Nikita Korolev Date: Wed, 26 Nov 2025 21:03:58 +0300 Subject: [PATCH 04/71] use kubeconf Signed-off-by: Nikita Korolev --- .github/workflows/e2e-matrix.yml | 256 ++++-- .github/workflows/e2e-reusable-pipeline.yml | 832 +++++++++++++++++- test/dvp-over-dvp/Taskfile.yaml | 27 +- .../templates/cluster-config.yaml | 22 + .../templates/disabled-modules.yaml | 3 +- .../charts/cluster-config/templates/mc.yaml | 12 +- .../charts/cluster-config/templates/nfs.yaml | 4 +- .../charts/cluster-config/templates/ngc.yaml | 17 + .../cluster-config/templates/nodegroups.yaml | 40 - .../templates/virtualization.yaml | 25 - .../infra/templates/nfs-server/deploy.yaml | 2 + .../infra/templates/nfs-server/pvc.yaml | 4 +- .../infra/templates/nfs-server/svc.yaml | 2 + test/dvp-over-dvp/nested-sa-config/gen-sa.sh | 184 ++++ test/dvp-over-dvp/storage/ceph/00-ms.yaml | 10 + test/dvp-over-dvp/storage/ceph/01-mc.yaml | 36 + test/dvp-over-dvp/storage/ceph/02-sa.yaml | 74 ++ test/dvp-over-dvp/storage/ceph/03-cm.yaml | 245 ++++++ .../dvp-over-dvp/storage/ceph/04-cluster.yaml | 111 +++ .../storage/ceph/05-blockpool.yaml | 13 + .../dvp-over-dvp/storage/ceph/06-toolbox.yaml | 83 ++ .../storage/ceph/ceph-configure.sh | 85 ++ .../storage/sds-replicated/lvg-gen.sh | 57 ++ .../storage/sds-replicated/mc.yaml | 32 + .../storage/sds-replicated/rsc-gen.sh | 87 ++ test/dvp-over-dvp/tools/deckhouse-queue.sh | 145 +++ test/dvp-over-dvp/values.example.yaml | 33 - test/e2e/scripts/task_run_ci.sh | 7 +- 28 files changed, 2245 insertions(+), 203 deletions(-) delete mode 100644 test/dvp-over-dvp/charts/cluster-config/templates/nodegroups.yaml delete mode 100644 test/dvp-over-dvp/charts/cluster-config/templates/virtualization.yaml create mode 100644 test/dvp-over-dvp/nested-sa-config/gen-sa.sh create mode 100644 test/dvp-over-dvp/storage/ceph/00-ms.yaml create mode 100644 test/dvp-over-dvp/storage/ceph/01-mc.yaml create mode 100644 test/dvp-over-dvp/storage/ceph/02-sa.yaml create mode 100644 test/dvp-over-dvp/storage/ceph/03-cm.yaml create mode 100644 test/dvp-over-dvp/storage/ceph/04-cluster.yaml create mode 100644 test/dvp-over-dvp/storage/ceph/05-blockpool.yaml create mode 100644 test/dvp-over-dvp/storage/ceph/06-toolbox.yaml create mode 100644 test/dvp-over-dvp/storage/ceph/ceph-configure.sh create mode 100755 test/dvp-over-dvp/storage/sds-replicated/lvg-gen.sh create mode 100644 test/dvp-over-dvp/storage/sds-replicated/mc.yaml create mode 100644 test/dvp-over-dvp/storage/sds-replicated/rsc-gen.sh create mode 100644 test/dvp-over-dvp/tools/deckhouse-queue.sh delete mode 100644 test/dvp-over-dvp/values.example.yaml diff --git a/.github/workflows/e2e-matrix.yml b/.github/workflows/e2e-matrix.yml index cf997b56ee..c1134dd952 100644 --- a/.github/workflows/e2e-matrix.yml +++ b/.github/workflows/e2e-matrix.yml @@ -14,10 +14,6 @@ name: E2E Matrix Tests (bootstrap) -env: - BRANCH: main - VIRTUALIZATION_TAG: main - DECKHOUSE_TAG: main on: workflow_dispatch: pull_request: @@ -26,101 +22,187 @@ on: - main - feat/ci/nightly-e2e-test-nested-env +concurrency: + group: "${{ github.workflow }}-${{ github.event.number || github.ref }}" + cancel-in-progress: true + defaults: run: shell: bash jobs: - bootstrap: - name: Bootstrap cluster + e2e-ceph: + name: E2E Pipeline (Ceph) + uses: ./.github/workflows/e2e-reusable-pipeline.yml + with: + storage_type: ceph + nested_storageclass_name: nested-ceph-pool-r2-csi-rbd + default_cluster_storageclass: ceph-pool-r2-csi-rbd-immediate + branch: main + virtualization_tag: main + deckhouse_tag: main + default_user: ubuntu + go_version: "1.24.6" + e2e_timeout: "3h" + secrets: + DEV_REGISTRY_DOCKER_CFG: ${{ secrets.DEV_REGISTRY_DOCKER_CFG }} + VIRT_E2E_NIGHTLY_SA_TOKEN: ${{ secrets.VIRT_E2E_NIGHTLY_SA_TOKEN }} + PROD_IO_REGISTRY_DOCKER_CFG: ${{ secrets.PROD_IO_REGISTRY_DOCKER_CFG }} + + e2e-replicated: + name: E2E Pipeline (Replicated) + uses: ./.github/workflows/e2e-reusable-pipeline.yml + with: + storage_type: replicated + nested_storageclass_name: nested-thin-r1 + default_cluster_storageclass: ceph-pool-r2-csi-rbd-immediate + branch: main + virtualization_tag: main + deckhouse_tag: main + default_user: ubuntu + go_version: "1.24.6" + e2e_timeout: "3h" + secrets: + DEV_REGISTRY_DOCKER_CFG: ${{ secrets.DEV_REGISTRY_DOCKER_CFG }} + VIRT_E2E_NIGHTLY_SA_TOKEN: ${{ secrets.VIRT_E2E_NIGHTLY_SA_TOKEN }} + PROD_IO_REGISTRY_DOCKER_CFG: ${{ secrets.PROD_IO_REGISTRY_DOCKER_CFG }} + + + report-to-channel: runs-on: ubuntu-latest + name: End-to-End tests report + needs: + - e2e-ceph + - e2e-replicated + if: ${{ always()}} steps: - - uses: actions/checkout@v4 - # with: - # ref: ${{ env.BRANCH }} + - name: Send results to channel + run: | + # Map storage types to CSI names + get_csi_name() { + local storage_type=$1 + case "$storage_type" in + "ceph") + echo "rbd.csi.ceph.com" + ;; + "replicated") + echo "replicated.csi.storage.deckhouse.io" + ;; + *) + echo "$storage_type" + ;; + esac + } - - name: Set outputs - id: vars - run: echo "sha_short=$(git rev-parse --short HEAD)" >> $GITHUB_OUTPUT + # Parse summary JSON and add to table + parse_summary() { + local summary_json=$1 + local storage_type=$2 + + if [ -z "$summary_json" ] || [ "$summary_json" == "null" ] || [ "$summary_json" == "" ]; then + echo "Warning: Empty summary for $storage_type" + return + fi - - name: Install htpasswd utility - run: | - sudo apt-get update - sudo apt-get install -y apache2-utils + # Try to parse as JSON (handle both JSON string and already parsed JSON) + if ! echo "$summary_json" | jq empty 2>/dev/null; then + echo "Warning: Invalid JSON for $storage_type: $summary_json" + return + fi - - name: Install Task - uses: arduino/setup-task@v2 - with: - version: 3.x - repo-token: ${{ secrets.GITHUB_TOKEN }} + # Parse JSON fields + csi_raw=$(echo "$summary_json" | jq -r '.CSI // empty' 2>/dev/null) + if [ -z "$csi_raw" ] || [ "$csi_raw" == "null" ] || [ "$csi_raw" == "" ]; then + csi=$(get_csi_name "$storage_type") + else + csi="$csi_raw" + fi + + date=$(echo "$summary_json" | jq -r '.Date // ""' 2>/dev/null) + time=$(echo "$summary_json" | jq -r '.StartTime // ""' 2>/dev/null) + branch=$(echo "$summary_json" | jq -r '.Branch // ""' 2>/dev/null) + status=$(echo "$summary_json" | jq -r '.Status // ":question: UNKNOWN"' 2>/dev/null) + passed=$(echo "$summary_json" | jq -r '.Passed // 0' 2>/dev/null) + failed=$(echo "$summary_json" | jq -r '.Failed // 0' 2>/dev/null) + pending=$(echo "$summary_json" | jq -r '.Pending // 0' 2>/dev/null) + skipped=$(echo "$summary_json" | jq -r '.Skipped // 0' 2>/dev/null) + link=$(echo "$summary_json" | jq -r '.Link // ""' 2>/dev/null) - - name: Setup d8 - uses: ./.github/actions/install-d8 + # Set defaults if empty + [ -z "$passed" ] && passed=0 + [ -z "$failed" ] && failed=0 + [ -z "$pending" ] && pending=0 + [ -z "$skipped" ] && skipped=0 + [ -z "$status" ] && status=":question: UNKNOWN" - - name: Generate values.yaml - run: | - cat < test/dvp-over-dvp/values.yaml - namespace: nightly-e2e-${{ steps.vars.outputs.sha_short }} - clusterConfigurationPrefix: tst-dhctl - sa: dkp-sa - deckhouse: - tag: ${{ env.DECKHOUSE_TAG }} - kubernetesVersion: Automatic - registryDockerCfg: ${{ secrets.DEV_REGISTRY_DOCKER_CFG }} - virtualization: - tag: ${{ env.VIRTUALIZATION_TAG }} - image: - url: https://89d64382-20df-4581-8cc7-80df331f67fa.selstorage.ru/ubuntu/noble-server-cloudimg-amd64.img - defaultUser: ubuntu - bootloader: BIOS - ingressHosts: - - api - - grafana - - dex - - prometheus - - console - - virtualization - instances: - masterNodes: - count: 1 - cores: 4 - coreFraction: 50% - memory: 14Gi - additionalNodes: - - name: worker - count: 3 - cores: 4 - coreFraction: 25% - memory: 8Gi - nodeType: CloudEphemeral + # Validate date + if [ -n "$date" ] && [ "$date" != "" ]; then + current_date=$(date +"%Y-%m-%d") + if date -d "$current_date" +%s >/dev/null 2>&1 && date -d "$date" +%s >/dev/null 2>&1; then + if [ "$(date -d "$current_date" +%s)" -gt "$(date -d "$date" +%s)" ]; then + status=":x: WRONG REPORT DATE!" + fi + fi + fi + + # Format link - use CSI name as fallback if link is empty + if [ -z "$link" ] || [ "$link" == "" ]; then + link_text="$csi" + else + link_text="[:link: $csi]($link)" + fi + + # Add row to table + markdown_table+="| $link_text | $status | $passed | $failed | $pending | $skipped | $date | $time | $branch |\n" + } + + # Initialize markdown table + markdown_table="" + header="| CSI | Status | Passed | Failed | Pending | Skipped | Date | Time | Branch|\n" + separator="|---|---|---|---|---|---|---|---|---|\n" + markdown_table+="$header" + markdown_table+="$separator" + + # Get current date for header + DATE=$(date +"%Y-%m-%d") + COMBINED_SUMMARY="## :dvp: **DVP | End-to-End tests | $DATE**\n\n" + + # Parse summaries from job outputs + # ceph_summary=${{ toJSON(needs.e2e-ceph.outputs.e2e-summary) }} + # replicated_summary=${{ toJSON(needs.e2e-replicated.outputs.e2e-summary) }} + # Save to json files + cat > /tmp/ceph.json << 'EOF' + ${{ needs.e2e-ceph.outputs.e2e-summary }} EOF - - - name: Test - run: | - echo "before run azure/k8s-set-context@v4" - - name: Configure kubectl via azure/k8s-set-context@v4 - uses: azure/k8s-set-context@v4 - with: - method: service-account - k8s-url: https://api.e2e.virtlab.flant.com - k8s-secret: ${{ secrets.VIRT_E2E_NIGHTLY_SA_TOKEN }} - - - name: Test after azure/k8s-set-context@v4 - run: | - echo "after run azure/k8s-set-context@v4" - kubectl get pods - - name: Bootstrap cluster - run: | - cd test/dvp-over-dvp - task install + cat > /tmp/replicated.json << 'EOF' + ${{ needs.e2e-replicated.outputs.e2e-summary }} + EOF - - name: Show nodes (test) - run: | - cd test/dvp-over-dvp - task kubectl -- get pods - - name: Show nodes (test) - if: ${{ always() }} - run: | - cd test/dvp-over-dvp - task infra-undeploy + if [ -s /tmp/ceph.json ] && [ "$(cat /tmp/ceph.json)" != '""' ] && [ "$(cat /tmp/ceph.json)" != '{}' ]; then + parse_summary "$(cat /tmp/ceph.json)" "ceph" + fi + + if [ -s /tmp/replicated.json ] && [ "$(cat /tmp/replicated.json)" != '""' ] && [ "$(cat /tmp/replicated.json)" != '{}' ]; then + parse_summary "$(cat /tmp/replicated.json)" "replicated" + fi + + # Parse each summary + # if [ -n "$ceph_summary" ] && [ "$ceph_summary" != "null" ]; then + # parse_summary "$ceph_summary" "ceph" + # fi + + # if [ -n "$replicated_summary" ] && [ "$replicated_summary" != "null" ]; then + # parse_summary "$replicated_summary" "replicated" + # fi + + COMBINED_SUMMARY+="${markdown_table}\n" + + echo -e "$COMBINED_SUMMARY" + + # Send to channel if webhook is configured + if [ -n "$LOOP_WEBHOOK_URL" ]; then + curl --request POST --header 'Content-Type: application/json' --data "{\"text\": \"${COMBINED_SUMMARY}\"}" "$LOOP_WEBHOOK_URL" + fi + env: + LOOP_WEBHOOK_URL: ${{ secrets.LOOP_TEST_CHANNEL }} \ No newline at end of file diff --git a/.github/workflows/e2e-reusable-pipeline.yml b/.github/workflows/e2e-reusable-pipeline.yml index d9b84d26ca..07674a1a29 100644 --- a/.github/workflows/e2e-reusable-pipeline.yml +++ b/.github/workflows/e2e-reusable-pipeline.yml @@ -52,8 +52,13 @@ on: go_version: required: false type: string - default: "1.24.5" + default: "1.24.6" description: "Go version" + e2e_timeout: + required: false + type: string + default: "3h" + description: "E2E tests timeout" secrets: DEV_REGISTRY_DOCKER_CFG: required: true @@ -61,8 +66,11 @@ on: required: true PROD_IO_REGISTRY_DOCKER_CFG: required: true - GITHUB_TOKEN: - required: true + outputs: + e2e-summary: + description: "E2E test results" + value: ${{ jobs.e2e-test.outputs.report-summary }} + env: BRANCH: ${{ inputs.branch }} @@ -76,9 +84,819 @@ defaults: shell: bash jobs: - noop: - name: Bootstrap + bootstrap: + name: Bootstrap cluster (${{ inputs.storage_type }}) + runs-on: ubuntu-latest + concurrency: + group: "${{ github.workflow }}-${{ github.event.number || github.ref }}-${{ inputs.storage_type }}" + cancel-in-progress: true + outputs: + kubeconfig-content: ${{ steps.generate-kubeconfig.outputs.config }} + storage-type: ${{ steps.vars.outputs.storage_type }} + nested-storageclass-name: ${{ steps.vars.outputs.nested_storageclass_name }} + steps: + - uses: actions/checkout@v4 + # with: + # ref: ${{ env.BRANCH }} + + - name: Set outputs + id: vars + run: | + namespace="nightly-e2e-${{ inputs.storage_type }}-$(git rev-parse --short HEAD)" + echo "namespace=$namespace" >> $GITHUB_OUTPUT + echo "sha_short=$(git rev-parse --short HEAD)" >> $GITHUB_OUTPUT + echo "storage_type=${{ inputs.storage_type }}" >> $GITHUB_OUTPUT + echo "nested_storageclass_name=${{ inputs.nested_storageclass_name }}" >> $GITHUB_OUTPUT + + REGISTRY=$(base64 -d <<< ${{secrets.DEV_REGISTRY_DOCKER_CFG}} | jq '.auths | to_entries | .[] | .key' -r) + USERNAME=$(base64 -d <<< ${{ secrets.DEV_REGISTRY_DOCKER_CFG }} | jq '.auths | to_entries | .[] | .value.auth' -r | base64 -d | cut -d ':' -f1) + PASSWORD=$(base64 -d <<< ${{ secrets.DEV_REGISTRY_DOCKER_CFG }} | jq '.auths | to_entries | .[] | .value.auth' -r | base64 -d | cut -d ':' -f2) + + echo "registry=$REGISTRY" >> $GITHUB_OUTPUT + echo "username=$USERNAME" >> $GITHUB_OUTPUT + echo "password=$PASSWORD" >> $GITHUB_OUTPUT + + - name: Install htpasswd utility + run: | + sudo apt-get update + sudo apt-get install -y apache2-utils + + - name: Install Task + uses: arduino/setup-task@v2 + with: + version: 3.x + repo-token: ${{ secrets.GITHUB_TOKEN }} + + - name: Setup d8 + uses: ./.github/actions/install-d8 + + - name: Log in to private registry + uses: docker/login-action@v3 + with: + registry: ${{ steps.vars.outputs.registry }} + username: ${{ steps.vars.outputs.username }} + password: ${{ steps.vars.outputs.password }} + + - name: Configure kubectl via azure/k8s-set-context@v4 + uses: azure/k8s-set-context@v4 + with: + method: kubeconfig + context: e2e-cluster-nightly-e2e-virt-sa + kubeconfig: ${{ secrets.VIRT_E2E_NIGHTLY_SA_TOKEN }} + + - name: Generate values.yaml + run: | + defaultStorageClass=$(kubectl get storageclass -o json \ + | jq -r '.items[] | select(.metadata.annotations."storageclass.kubernetes.io/is-default-class" == "true") | .metadata.name') + + cat < test/dvp-over-dvp/values.yaml + namespace: ${{ steps.vars.outputs.namespace }} + storageClass: ${defaultStorageClass} + nfsEnabled: false + nfsSC: nested-nfs-${{ inputs.storage_type }}-${{ steps.vars.outputs.sha_short }} + defaultClusterStorageClass: ${{ inputs.default_cluster_storageclass }} + clusterConfigurationPrefix: ${{ inputs.storage_type }} + sa: dkp-sa + deckhouse: + tag: ${{ env.DECKHOUSE_TAG }} + kubernetesVersion: Automatic + registryDockerCfg: ${{ secrets.DEV_REGISTRY_DOCKER_CFG }} + image: + url: https://89d64382-20df-4581-8cc7-80df331f67fa.selstorage.ru/ubuntu/noble-server-cloudimg-amd64.img + defaultUser: ${{ env.DEFAULT_USER }} + bootloader: BIOS + ingressHosts: + - api + - grafana + - dex + - prometheus + - console + - virtualization + instances: + masterNodes: + count: 1 + cores: 8 + coreFraction: 50% + memory: 14Gi + additionalNodes: + - name: worker + count: 3 + cores: 10 + coreFraction: 25% + memory: 8Gi + nodeType: CloudEphemeral + bootloader: BIOS + EOF + + - name: Bootstrap cluster [infra-deploy] + working-directory: test/dvp-over-dvp + run: | + task infra-deploy + - name: Bootstrap cluster [dhctl-bootstrap] + id: dhctl-bootstrap + working-directory: test/dvp-over-dvp + run: | + task dhctl-bootstrap + timeout-minutes: 30 + - name: Bootstrap cluster [show-connection-info] + working-directory: test/dvp-over-dvp + run: | + task show-connection-info + + - name: Save ssh to secrets in cluster + env: + NAMESPACE: ${{ steps.vars.outputs.namespace }} + if: always() + run: | + kubectl -n $NAMESPACE create secret generic ssh-key --from-file=test/dvp-over-dvp/tmp/ssh/cloud + + - name: Get info about nested master VM + working-directory: test/dvp-over-dvp + env: + NAMESPACE: ${{ steps.vars.outputs.namespace }} + run: | + nested_master=$(kubectl -n ${NAMESPACE} get vm -l dvp.deckhouse.io/node-group=master -o jsonpath="{.items[0].metadata.name}") + + echo "Pods" + kubectl get pods -n "${NAMESPACE}" + echo "" + + echo "VMs" + kubectl get vm -n "${NAMESPACE}" + echo "" + + echo "VDs" + kubectl get vd -n "${NAMESPACE}" + echo "" + + echo "login to master" + echo "os-release master" + d8 v ssh -i ./tmp/ssh/cloud \ + --local-ssh=true \ + --local-ssh-opts="-o StrictHostKeyChecking=no" \ + --local-ssh-opts="-o UserKnownHostsFile=/dev/null" \ + ${DEFAULT_USER}@${nested_master}.${NAMESPACE} \ + -c 'cat /etc/os-release' + echo "" + + echo "hostname master" + d8 v ssh -i ./tmp/ssh/cloud \ + --local-ssh=true \ + --local-ssh-opts="-o StrictHostKeyChecking=no" \ + --local-ssh-opts="-o UserKnownHostsFile=/dev/null" \ + ${DEFAULT_USER}@${nested_master}.${NAMESPACE} \ + -c 'hostname' + + - name: Generate nested kubeconfig + id: generate-kubeconfig + working-directory: test/dvp-over-dvp + env: + kubeConfigPath: tmp/kube.config + NAMESPACE: ${{ steps.vars.outputs.namespace }} + run: | + nested_master=$(kubectl -n $NAMESPACE get vm -l dvp.deckhouse.io/node-group=master -o jsonpath="{.items[0].metadata.name}") + + d8vscp() { + local source=$1 + local dest=$2 + d8 v scp -i ./tmp/ssh/cloud \ + --local-ssh=true \ + --local-ssh-opts="-o StrictHostKeyChecking=no" \ + --local-ssh-opts="-o UserKnownHostsFile=/dev/null" \ + $source $dest + echo "d8vscp: $source -> $dest - done" + } + + d8vssh() { + local cmd=$1 + d8 v ssh -i ./tmp/ssh/cloud \ + --local-ssh=true \ + --local-ssh-opts="-o StrictHostKeyChecking=no" \ + --local-ssh-opts="-o UserKnownHostsFile=/dev/null" \ + ${DEFAULT_USER}@${nested_master}.${NAMESPACE} \ + -c "$cmd" + } + + echo "Copy script for generating kubeconfig in nested cluster" + echo "Copy nested-sa-config/gen-sa.sh to master" + d8vscp "./nested-sa-config/gen-sa.sh" "${DEFAULT_USER}@${nested_master}.${NAMESPACE}:/tmp/gen-sa.sh" + echo "" + d8vscp "./tools/deckhouse-queue.sh" "${DEFAULT_USER}@${nested_master}.${NAMESPACE}:/tmp/deckhouse-queue.sh" + echo "" + + d8 v ssh -i ./tmp/ssh/cloud \ + --local-ssh=true \ + --local-ssh-opts="-o StrictHostKeyChecking=no" \ + --local-ssh-opts="-o UserKnownHostsFile=/dev/null" \ + ${DEFAULT_USER}@${nested_master}.${NAMESPACE} \ + -c 'chmod +x /tmp/{gen-sa.sh,deckhouse-queue.sh}' + echo "" + + d8 v ssh -i ./tmp/ssh/cloud \ + --local-ssh=true \ + --local-ssh-opts="-o StrictHostKeyChecking=no" \ + --local-ssh-opts="-o UserKnownHostsFile=/dev/null" \ + ${DEFAULT_USER}@${nested_master}.${NAMESPACE} \ + -c 'ls -la /tmp/' + echo "===" + + echo "Check d8 queue" + d8 v ssh -i ./tmp/ssh/cloud \ + --local-ssh=true \ + --local-ssh-opts="-o StrictHostKeyChecking=no" \ + --local-ssh-opts="-o UserKnownHostsFile=/dev/null" \ + ${DEFAULT_USER}@${nested_master}.${NAMESPACE} \ + -c 'sudo /tmp/deckhouse-queue.sh' + + echo "Generate kube conf in nested cluster" + echo "run nested-sa-config/gen-sa.sh" + + # "Usage: gen-sa.sh [FILE_NAME]" + echo "===" + d8 v ssh -i ./tmp/ssh/cloud \ + --local-ssh=true \ + --local-ssh-opts="-o StrictHostKeyChecking=no" \ + --local-ssh-opts="-o UserKnownHostsFile=/dev/null" \ + ${DEFAULT_USER}@${nested_master}.$NAMESPACE \ + -c "sudo /tmp/gen-sa.sh nested-sa nested nested-e2e /${kubeConfigPath}" + + echo "'sudo /tmp/gen-sa.sh nested-sa nested nested-e2e /${kubeConfigPath}' - done" + echo "" + + echo "Copy kubeconfig to runner" + echo "${DEFAULT_USER}@${nested_master}.$NAMESPACE:/${kubeConfigPath} ./${kubeConfigPath}" + d8vscp "${DEFAULT_USER}@${nested_master}.$NAMESPACE:/${kubeConfigPath}" "./${kubeConfigPath}" + + echo "=== Set rights for kubeconfig ===" + echo "sudo chown 1001:1001 ${kubeConfigPath}" + sudo chown 1001:1001 ${kubeConfigPath} + echo "rights - done" + + echo "Kubeconf to github output" + CONFIG=$(cat ${kubeConfigPath} | base64 -w 0) + CONFIG=$(echo $CONFIG | base64 -w 0) + echo "config=$CONFIG" >> $GITHUB_OUTPUT + + - name: cloud-init logs + if: steps.dhctl-bootstrap.outcome == 'failure' + env: + NAMESPACE: ${{ steps.vars.outputs.namespace }} + run: | + nested_master=$(kubectl -n $NAMESPACE get vm -l dvp.deckhouse.io/node-group=master -o jsonpath="{.items[0].metadata.name}") + + d8vscp() { + local source=$1 + local dest=$2 + d8 v scp -i ./tmp/ssh/cloud \ + --local-ssh=true \ + --local-ssh-opts="-o StrictHostKeyChecking=no" \ + --local-ssh-opts="-o UserKnownHostsFile=/dev/null" \ + $source $dest + echo "d8vscp: $source -> $dest - done" + } + + d8vscp "${DEFAULT_USER}@${nested_master}.$NAMESPACE:/var/log/cloud-init*.log" "./test/dvp-over-dvp/tmp/" + + - name: Prepare artifact + if: always() + run: | + sudo chown -fR 1001:1001 test/dvp-over-dvp + yq e '.deckhouse.registryDockerCfg = "None"' -i ./test/dvp-over-dvp/values.yaml + yq e 'select(.kind == "InitConfiguration") .deckhouse.registryDockerCfg = "None"' -i ./test/dvp-over-dvp/tmp/config.yaml + echo "${{ steps.generate-kubeconfig.outputs.config }}" | base64 -d | base64 -d > ./test/dvp-over-dvp/kube-config + + - name: Upload generated files + uses: actions/upload-artifact@v4 + id: artifact-upload + if: always() + with: + name: generated-files-${{ inputs.storage_type }} + path: | + test/dvp-over-dvp/tmp + test/dvp-over-dvp/values.yaml + overwrite: true + include-hidden-files: true + retention-days: 1 + + - name: Upload ssh config + uses: actions/upload-artifact@v4 + id: artifact-upload-ssh + if: always() + with: + name: generated-files-ssh-${{ inputs.storage_type }} + path: test/dvp-over-dvp/tmp/ssh + overwrite: true + include-hidden-files: true + retention-days: 1 + + - name: Upload kubeconfig config + uses: actions/upload-artifact@v4 + id: artifact-upload-kubeconfig + if: always() + with: + name: generated-files-kubeconfig-${{ inputs.storage_type }} + path: test/dvp-over-dvp/kube-config + overwrite: true + include-hidden-files: true + retention-days: 1 + + configure-storage: + name: Configure storage (${{ inputs.storage_type }}) + runs-on: ubuntu-latest + needs: bootstrap + steps: + - uses: actions/checkout@v4 + + - name: Install Task + uses: arduino/setup-task@v2 + with: + version: 3.x + repo-token: ${{ secrets.GITHUB_TOKEN }} + + - name: Setup d8 + uses: ./.github/actions/install-d8 + - name: Install kubectl CLI + uses: azure/setup-kubectl@v4 + + - name: Check kubeconfig + run: | + mkdir -p ~/.kube + echo "Configure kube config" + echo "${{ needs.bootstrap.outputs.kubeconfig-content }}" | base64 -d | base64 -d > ~/.kube/config + + echo "Show paths and files content" + ls -la ~/.kube + chmod 600 ~/.kube/config + + echo "kubectl get nodes" + kubectl config use-context nested-e2e-nested-sa + kubectl get nodes + + - name: Configure replicated storage + if: ${{ inputs.storage_type == 'replicated' }} + working-directory: test/dvp-over-dvp/storage/sds-replicated + run: | + kubectl apply -f mc.yaml + echo "Wait for sds-node-configurator" + kubectl wait --for=jsonpath='{.status.phase}'=Ready modules sds-node-configurator --timeout=300s + # echo "Wait for sds-replicated" + # kubectl wait --for=jsonpath='{.status.phase}'=Ready modules sds-replicated-volume --timeout=300s + + for i in {1..60}; do + sds_replicated_volume_status=$(kubectl get ns d8-sds-replicated-volume -o jsonpath='{.status.phase}' || echo "False") + + if [[ "${sds_replicated_volume_status}" = "Active" ]]; then + echo "Namespaces sds-replicated-volume are Active" + kubectl -n d8-sds-replicated-volume get pods + break + fi + + echo "Waiting 10s for sds-replicated-volume to be ready" + echo "get ns" + kubectl get ns | grep sds-replicated-volume || echo "Namespaces sds-replicated-volume are not ready" + + if (( i % 5 == 0 )); then + d8 p queue list | head -n25 || echo "No queues" + fi + sleep 10 + done + + echo "Wait bd" + workers=$(kubectl get nodes -o name | grep worker | wc -l) + bdexists=false + count=60 + for i in $(seq 1 $count); do + blockdevices=$(kubectl get blockdevice -o name | wc -l) + if [ $blockdevices -ge $workers ]; then + bdexists=true + break + fi + echo "Wait 10 sec until blockdevices is greater or equal to $workers [${i}/${count}]" + d8 p queue list | head -n25 || echo "No queues" + sleep 10 + done + + if [ $bdexists = false ]; then + echo "Blockdevices is not 3" + echo "Show blockdevice" + kubectl get blockdevice + echo "Show sds namespaces" + kubectl get ns | grep sds || echo "ns sds is not found" + echo "Show cluster nodes" + kubectl get nodes + echo "Show deckhouse logs" + d8 p logs | tail -n 100 + exit 1 + fi + + chmod +x lvg-gen.sh + ./lvg-gen.sh + + chmod +x rsc-gen.sh + ./rsc-gen.sh + + echo "====== Show nested storageclasses =======" + kubectl get sc | grep nested || echo "No nested storageclasses" + echo "Done" + - name: Configure ceph storage + if: ${{ inputs.storage_type == 'ceph' }} + run: | + d8_queue_list() { + d8 p queue list | grep -Po '([0-9]+)(?= active)' || echo "Failed to retrieve list queue" + } + + d8_queue() { + local count=90 + local list_queue_ready=false + + for i in $(seq 1 $count) ; do + if [[ "$(d8_queue_list)" == "0" ]]; then + echo "Queue list is clear" + list_queue_ready=true + else + echo "Show queue list" + d8 p queue list | head -n25 || echo "Failed to retrieve list queue" + fi + + if [[ "$list_queue_ready" = true ]]; then + break + fi + echo "====" + echo "Wait until queues are empty ${i}/${count}" + echo "====" + kubectl get ns | grep sds || echo "ns sds is not ready" + echo " " + sleep 10 + done + } + + cd test/dvp-over-dvp/storage/ceph + export registry=${{ secrets.PROD_IO_REGISTRY_DOCKER_CFG }} + yq e '.spec.registry.dockerCfg = env(registry)' -i 00-ms.yaml + unset registry + + echo "Create prod module source" + kubectl apply -f 00-ms.yaml + kubectl get ms + + echo "Create ceph operator and csi module config" + kubectl apply -f 01-mc.yaml + + d8_queue + + echo "Start wait for ceph operator and csi" + for i in {1..60}; do + ceph_operator_status=$(kubectl get ns d8-operator-ceph -o jsonpath='{.status.phase}' || echo "False") + csi_ceph_status=$(kubectl get module csi-ceph -o jsonpath='{.status.phase}' || echo "False") + + if [[ "${ceph_operator_status}" = "Active" ]] && [[ "${csi_ceph_status}" = "Ready" ]]; then + echo "Namespaces operator-ceph and csi are Active" + break + fi + + echo "Waiting 10s for ceph operator and csi namespaces to be ready" + echo "get ns" + kubectl get ns | grep ceph || echo "Namespaces operator-ceph and csi are not ready" + + if (( i % 5 == 0 )); then + echo "Show all ns" + kubectl get ns + echo "=====" + d8 p queue list | head -n25 || echo "Failed to retrieve list queue" + fi + sleep 10 + done + + echo "Create sa" + kubectl apply -f 02-sa.yaml + echo "Create cm (patch existing for configure rbd support)" + kubectl apply -f 03-cm.yaml + echo "Create cluster" + kubectl apply -f 04-cluster.yaml + + echo "get pod in d8-operator-ceph" + kubectl -n d8-operator-ceph get po + + echo "Wait for ceph operator" + for i in {1..60}; do + echo "Check ceph pods, mon mgr osd" + ceph_mgr=$(kubectl -n d8-operator-ceph get po 2>/dev/null | grep ceph-mgr | grep -c Running || echo 0) + ceph_mon=$(kubectl -n d8-operator-ceph get po 2>/dev/null | grep ceph-mon | grep -c Running || echo 0) + ceph_osd=$(kubectl -n d8-operator-ceph get po 2>/dev/null | grep ceph-osd | grep -c Running || echo 0) + + echo "check if ceph pods are ready" + if [[ "${ceph_mgr}" -ge "2" ]] && [[ "${ceph_mon}" -ge "3" ]] && [[ "${ceph_osd}" -ge "3" ]]; then + echo "Ceph cluster is ready" + break + fi + + echo "Not all pods are ready, ceph_mgr=${ceph_mgr}, ceph_mon=${ceph_mon}, ceph_osd=${ceph_osd}" + echo "Waiting 10s for ceph operator to be ready" + kubectl -n d8-operator-ceph get po || echo "Failed to retrieve pods" + if (( i % 5 == 0 )); then + echo "= Get ceph ns =" + kubectl get ns | grep ceph || echo "Failed to retrieve ceph ns" + echo "= Get mc =" + kubectl get mc | grep ceph || echo "Failed to retrieve mc" + echo "= Get modules =" + kubectl get modules -o wide | grep ceph || echo "Failed to retrieve modules" + echo "=====" + echo "Show queue" + d8 p queue list | head -n25 || echo "Failed to retrieve list queue" + echo "=====" + fi + echo "====" + echo "Wait until all necessary pods are ready ${i}/60" + echo "====" + sleep 10 + done + + echo "Show pods" + kubectl get pods -n d8-operator-ceph + + kubectl apply -f 05-blockpool.yaml + kubectl apply -f 06-toolbox.yaml + echo "Wait for rook-ceph-tools, timeout 300s" + kubectl -n d8-operator-ceph wait --for=condition=Available deployment/rook-ceph-tools --timeout=300s + + echo "-- ls ceph pool --" + kubectl -n d8-operator-ceph exec deployments/rook-ceph-tools -c ceph-tools -- ceph osd pool ls + echo "------" + + echo "Configure storage class" + chmod +x ./ceph-configure.sh + ./ceph-configure.sh + + configure-virtualization: + name: Configure Virtualization (${{ inputs.storage_type }}) + runs-on: ubuntu-22.04 + needs: + - bootstrap + - configure-storage + steps: + - uses: actions/checkout@v4 + - name: Install kubectl CLI + uses: azure/setup-kubectl@v4 + - name: Setup d8 + uses: ./.github/actions/install-d8 + + - name: Check kubeconfig + run: | + echo "Configure kube config" + mkdir -p ~/.kube + echo "${{ needs.bootstrap.outputs.kubeconfig-content }}" | base64 -d | base64 -d > ~/.kube/config + chmod 600 ~/.kube/config + kubectl config use-context nested-e2e-nested-sa + + - name: Configure Virtualization + run: | + echo "Apply Virtualization module config" + kubectl apply -f -< ~/.kube/config + chmod 600 ~/.kube/config + kubectl config use-context nested-e2e-nested-sa + kubectl get vmclass + + - name: Download dependencies + working-directory: ./test/e2e/ + run: | + echo "Download dependencies" + go mod download + + - name: Create vmclass for e2e tests + run: | + kubectl get vmclass/generic -o json | jq 'del(.status) | del(.metadata) | .metadata = {"name":"generic-for-e2e","annotations":{"virtualmachineclass.virtualization.deckhouse.io/is-default-class":"true"}} ' | kubectl create -f - + + - name: Run E2E + id: e2e-tests + env: + TIMEOUT: ${{ inputs.e2e_timeout }} + working-directory: ./test/e2e/ + run: | + if [[ "${{ inputs.storage_type }}" == "replicated" ]]; then + export SKIP_IMMEDIATE_SC_CHECK="yes" + fi + STORAGE_CLASS_NAME=${{ inputs.nested_storageclass_name }} FOCUS="VirtualMachineConfiguration" task run:ci -v LABELS="Slow" + + # - uses: actions/upload-artifact@v4 + # if: always() + # with: + # name: resources_from_failed_tests_${{ inputs.storage_type }} + # path: ${{ runner.temp }}/e2e_failed__* + # if-no-files-found: ignore + + - name: Save results + working-directory: ./test/e2e/ + id: report + env: + input_storage_type: ${{ inputs.storage_type }} + if: always() + run: | + if [ -z "$SUMMARY" ]; then + SUMMARY=$(jq -n \ + --arg csi "$input_storage_type" \ + --arg date "$DATE" \ + --arg startTime "$START_TIME" \ + --arg branch "$GITHUB_REF_NAME" \ + --arg status ":question: UNKNOWN" \ + --arg link "$GITHUB_SERVER_URL/$GITHUB_REPOSITORY/actions/runs/$GITHUB_RUN_ID/" \ + '{ + CSI: $csi, + Date: $date, + StartTime: $startTime, + Branch: $branch, + Status: $status, + Link: $link + }' + ) + fi + echo $SUMMARY | jq + echo "summary=$SUMMARY" >> $GITHUB_OUTPUT + echo $SUMMARY > "e2e_summary_${{ inputs.storage_type }}_$DATE.json" + + - name: Upload summary test results + uses: actions/upload-artifact@v4 + id: e2e-summary-artifact + if: always() + with: + name: e2e_summary_${{ inputs.storage_type }}_${{ env.DATE }} + path: test/e2e/e2e_summary_${{ inputs.storage_type }}.json + if-no-files-found: ignore + + + undeploy-cluster: + name: Undeploy cluster (${{ inputs.storage_type }}) runs-on: ubuntu-latest + needs: + - bootstrap + - configure-storage + - configure-virtualization + - e2e-test + # if: always() + if: cancelled() || success() steps: - - name: Say hello - run: echo "Bootstrap workflow OK" + - uses: actions/checkout@v4 + + - name: Install htpasswd utility + run: | + sudo apt-get update + sudo apt-get install -y apache2-utils + + - name: Setup d8 + uses: ./.github/actions/install-d8 + + - name: Install Task + uses: arduino/setup-task@v2 + with: + version: 3.x + repo-token: ${{ secrets.GITHUB_TOKEN }} + + - name: Download artifacts + uses: actions/download-artifact@v5 + with: + name: generated-files-${{ inputs.storage_type }} + path: test/dvp-over-dvp/ + + - name: Configure kubectl via azure/k8s-set-context@v4 + uses: azure/k8s-set-context@v4 + with: + method: kubeconfig + context: e2e-cluster-nightly-e2e-virt-sa + kubeconfig: ${{ secrets.VIRT_E2E_NIGHTLY_SA_TOKEN }} + + - name: infra-undeploy + working-directory: test/dvp-over-dvp + run: | + task infra-undeploy diff --git a/test/dvp-over-dvp/Taskfile.yaml b/test/dvp-over-dvp/Taskfile.yaml index 13668e5e77..cdc348e487 100644 --- a/test/dvp-over-dvp/Taskfile.yaml +++ b/test/dvp-over-dvp/Taskfile.yaml @@ -75,7 +75,7 @@ tasks: cmds: - kubectl apply -f {{ .TMP_DIR }}/infra.yaml - kubectl -n {{ .NAMESPACE }} wait --for=condition=Ready pod -l app=jump-host --timeout=300s - - kubectl -n {{ .NAMESPACE }} wait --for=condition=Ready pod -l app=nfs-server --timeout=300s + # - kubectl -n {{ .NAMESPACE }} wait --for=condition=Ready pod -l app=nfs-server --timeout=300s - | export end_time=$(date +%s) difference=$((end_time - {{.start_time}})) @@ -143,15 +143,16 @@ tasks: start_time: sh: date +%s JUMPHOST_EXT_IP: - sh: kubectl -n {{ .NAMESPACE }} exec -it deployment/jump-host -- dig @resolver4.opendns.com myip.opendns.com +short + sh: kubectl -n {{ .NAMESPACE }} exec deployment/jump-host -- dig @resolver4.opendns.com myip.opendns.com +short JUMPHOST_NODEPORT: - sh: kubectl -n {{ .NAMESPACE }} get svc jump-host -o json | jq '.spec.ports[] | select(.port==2222) | .nodePort' + sh: kubectl -n {{ .NAMESPACE }} get svc jump-host -o json | jq ".spec.ports[] | select(.port==2222) | .nodePort" cmds: - | - docker run --pull=always -it \ + docker run --pull=always \ -v "{{ .TMP_DIR }}/config.yaml:/config.yaml" \ -v "{{ .SSH_DIR }}:/tmp/.ssh/" \ - dev-registry.deckhouse.io/sys/deckhouse-oss/install:main \ + -v "{{ .TMP_DIR }}/dhctl:/tmp/dhctl/" \ + dev-registry.deckhouse.io/sys/deckhouse-oss/install:{{ .DECKHOUSE_TAG }} \ dhctl bootstrap \ --config=/config.yaml \ --ssh-agent-private-keys=/tmp/.ssh/{{ .SSH_FILE_NAME }} \ @@ -225,3 +226,19 @@ tasks: - task: __ssh-command vars: CMD: TERM=xterm-256color sudo /usr/local/bin/k9s {{ .CLI_ARGS }} + + configure:cluster:sa: + desc: Configure kubeconfig for nested cluster + vars: + script: gen-sa.sh + cmds: + - rsync -azv -e "ssh -i {{ .SSH_PRIV_KEY_FILE }} -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o 'ProxyCommand=d8 v port-forward --stdio=true master-0.{{ .NAMESPACE }} 22'" ./nested-sa-config/{{ .script }} cloud@master-0:/tmp/ + - task: __ssh-command + vars: + CMD: sudo chmod +x /tmp/{{ .script }} + - task: __ssh-command + vars: + CMD: sudo /tmp/{{ .script }} + - task: __ssh-command + vars: + CMD: sudo /opt/deckhouse/bin/kubectl apply -f {{ .config }} diff --git a/test/dvp-over-dvp/charts/cluster-config/templates/cluster-config.yaml b/test/dvp-over-dvp/charts/cluster-config/templates/cluster-config.yaml index c9dbc7cf2b..fa7fd15e14 100644 --- a/test/dvp-over-dvp/charts/cluster-config/templates/cluster-config.yaml +++ b/test/dvp-over-dvp/charts/cluster-config/templates/cluster-config.yaml @@ -42,6 +42,28 @@ masterNodeGroup: etcdDisk: size: 15Gi storageClass: {{ .Values.storageClass }} +nodeGroups: +{{- range .Values.instances.additionalNodes }} + - name: {{ .name }} + replicas: {{ .count }} + instanceClass: + virtualMachine: + bootloader: {{ .bootloader }} + cpu: + cores: {{ .cores }} + coreFraction: {{ .coreFraction }} + memory: + size: {{ .memory }} + virtualMachineClassName: "{{ $.Values.namespace }}-cpu" + rootDisk: + size: 50Gi + image: + kind: VirtualImage + name: image + additionalDisks: + - size: 50Gi + storageClass: {{ $.Values.storageClass }} +{{- end }} provider: kubeconfigDataBase64: {{ .Values.kubeconfigDataBase64 }} namespace: {{ .Values.namespace }} diff --git a/test/dvp-over-dvp/charts/cluster-config/templates/disabled-modules.yaml b/test/dvp-over-dvp/charts/cluster-config/templates/disabled-modules.yaml index 2887a2b168..2ae5da7f95 100644 --- a/test/dvp-over-dvp/charts/cluster-config/templates/disabled-modules.yaml +++ b/test/dvp-over-dvp/charts/cluster-config/templates/disabled-modules.yaml @@ -1,4 +1,5 @@ -{{- $modules := list "upmeter" "local-path-provisioner" "pod-reloader" "secret-copier" "namespace-configurator" -}} +{{/* "local-path-provisioner" */}} +{{- $modules := list "upmeter" "pod-reloader" "secret-copier" "namespace-configurator" -}} {{ range $modules }} --- apiVersion: deckhouse.io/v1alpha1 diff --git a/test/dvp-over-dvp/charts/cluster-config/templates/mc.yaml b/test/dvp-over-dvp/charts/cluster-config/templates/mc.yaml index dacae1acd3..369c2eb09f 100644 --- a/test/dvp-over-dvp/charts/cluster-config/templates/mc.yaml +++ b/test/dvp-over-dvp/charts/cluster-config/templates/mc.yaml @@ -15,11 +15,18 @@ kind: ModuleConfig metadata: name: global spec: - version: 1 + version: 2 settings: - defaultClusterStorageClass: nfs + {{- if .Values.defaultClusterStorageClass }} + defaultClusterStorageClass: {{ .Values.defaultClusterStorageClass }} + {{- end }} modules: publicDomainTemplate: "%s.{{ .Values.namespace }}.{{ .Values.domain }}" + https: + certManager: + clusterIssuerName: selfsigned + # clusterIssuerName: letsencrypt-staging + mode: CertManager --- apiVersion: deckhouse.io/v1alpha1 kind: ModuleConfig @@ -44,6 +51,7 @@ metadata: name: user-authz spec: enabled: true + version: 1 --- apiVersion: deckhouse.io/v1alpha1 kind: ModuleConfig diff --git a/test/dvp-over-dvp/charts/cluster-config/templates/nfs.yaml b/test/dvp-over-dvp/charts/cluster-config/templates/nfs.yaml index d2b4ff4666..a14d46e181 100644 --- a/test/dvp-over-dvp/charts/cluster-config/templates/nfs.yaml +++ b/test/dvp-over-dvp/charts/cluster-config/templates/nfs.yaml @@ -1,3 +1,4 @@ +{{ if .Values.nfsEnabled }} --- apiVersion: deckhouse.io/v1alpha1 kind: ModuleConfig @@ -19,7 +20,7 @@ spec: apiVersion: storage.deckhouse.io/v1alpha1 kind: NFSStorageClass metadata: - name: nfs + name: {{ .Values.nfsSC }} spec: connection: host: "nfs-server.{{ .Values.namespace }}.svc.cluster.local" @@ -31,3 +32,4 @@ spec: retransmissions: 3 reclaimPolicy: Delete volumeBindingMode: Immediate +{{ end }} diff --git a/test/dvp-over-dvp/charts/cluster-config/templates/ngc.yaml b/test/dvp-over-dvp/charts/cluster-config/templates/ngc.yaml index 3672dc8e79..b3006cc249 100644 --- a/test/dvp-over-dvp/charts/cluster-config/templates/ngc.yaml +++ b/test/dvp-over-dvp/charts/cluster-config/templates/ngc.yaml @@ -13,6 +13,23 @@ spec: --- apiVersion: deckhouse.io/v1alpha1 kind: NodeGroupConfiguration +metadata: + name: astra-d8-dm-modules.conf +spec: + weight: 98 + nodeGroups: ["*"] + bundles: ["astra", "ubuntu-lts", "debian"] + content: | + bb-sync-file /etc/modules-load.d/d8-dm-modules.conf - << "EOF" + dm_snapshot + dm_thin_pool + dm_cache + EOF + + systemctl restart systemd-modules-load.service +--- +apiVersion: deckhouse.io/v1alpha1 +kind: NodeGroupConfiguration metadata: name: install-tools.sh spec: diff --git a/test/dvp-over-dvp/charts/cluster-config/templates/nodegroups.yaml b/test/dvp-over-dvp/charts/cluster-config/templates/nodegroups.yaml deleted file mode 100644 index 4025e441b7..0000000000 --- a/test/dvp-over-dvp/charts/cluster-config/templates/nodegroups.yaml +++ /dev/null @@ -1,40 +0,0 @@ -{{ range .Values.instances.additionalNodes }} ---- -apiVersion: deckhouse.io/v1alpha1 -kind: DVPInstanceClass -metadata: - name: {{ .name }} -spec: - virtualMachine: - virtualMachineClassName: "{{ $.Values.namespace }}-cpu" - cpu: - cores: {{ .cores }} - coreFraction: {{ .coreFraction }} - memory: - size: {{ .memory }} - bootloader: {{ $.Values.image.bootloader }} - rootDisk: - size: 50Gi - storageClass: {{ $.Values.storageClass }} - image: - kind: VirtualImage - name: image ---- -apiVersion: deckhouse.io/v1 -kind: NodeGroup -metadata: - name: {{ .name }} -spec: -{{- if eq .name "system" }} - nodeTemplate: - labels: - node-role.deckhouse.io/system: "" -{{- end }} - nodeType: {{ .nodeType | default "CloudEphemeral" }} - cloudInstances: - minPerZone: {{ .count }} - maxPerZone: {{ .count }} - classReference: - kind: DVPInstanceClass - name: {{ .name }} -{{ end }} diff --git a/test/dvp-over-dvp/charts/cluster-config/templates/virtualization.yaml b/test/dvp-over-dvp/charts/cluster-config/templates/virtualization.yaml deleted file mode 100644 index c5a27c5749..0000000000 --- a/test/dvp-over-dvp/charts/cluster-config/templates/virtualization.yaml +++ /dev/null @@ -1,25 +0,0 @@ ---- -apiVersion: deckhouse.io/v1alpha1 -kind: ModuleConfig -metadata: - name: virtualization -spec: - enabled: true - settings: - dvcr: - storage: - persistentVolumeClaim: - size: 10Gi - # storageClassName: linstor-thin-r1 - type: PersistentVolumeClaim - virtualMachineCIDRs: - - 192.168.10.0/24 - version: 1 ---- -apiVersion: deckhouse.io/v1alpha2 -kind: ModulePullOverride -metadata: - name: virtualization -spec: - imageTag: {{ .Values.virtualization.tag }} - scanInterval: 15s diff --git a/test/dvp-over-dvp/charts/infra/templates/nfs-server/deploy.yaml b/test/dvp-over-dvp/charts/infra/templates/nfs-server/deploy.yaml index e3b934ec9c..99573c35b2 100644 --- a/test/dvp-over-dvp/charts/infra/templates/nfs-server/deploy.yaml +++ b/test/dvp-over-dvp/charts/infra/templates/nfs-server/deploy.yaml @@ -1,3 +1,4 @@ +{{ if .Values.nfsEnabled }} --- kind: Deployment apiVersion: apps/v1 @@ -40,3 +41,4 @@ spec: - name: nfs-data persistentVolumeClaim: claimName: nfs-data +{{ end }} diff --git a/test/dvp-over-dvp/charts/infra/templates/nfs-server/pvc.yaml b/test/dvp-over-dvp/charts/infra/templates/nfs-server/pvc.yaml index e19ba3f190..430796d9b1 100644 --- a/test/dvp-over-dvp/charts/infra/templates/nfs-server/pvc.yaml +++ b/test/dvp-over-dvp/charts/infra/templates/nfs-server/pvc.yaml @@ -1,3 +1,4 @@ +{{ if .Values.nfsEnabled }} --- apiVersion: v1 kind: PersistentVolumeClaim @@ -9,5 +10,6 @@ spec: - ReadWriteOnce resources: requests: - storage: 50Gi + storage: 10Gi storageClassName: {{ .Values.storageClass }} +{{ end }} diff --git a/test/dvp-over-dvp/charts/infra/templates/nfs-server/svc.yaml b/test/dvp-over-dvp/charts/infra/templates/nfs-server/svc.yaml index 0aca8064da..a7e850a669 100644 --- a/test/dvp-over-dvp/charts/infra/templates/nfs-server/svc.yaml +++ b/test/dvp-over-dvp/charts/infra/templates/nfs-server/svc.yaml @@ -1,3 +1,4 @@ +{{ if .Values.nfsEnabled }} --- kind: Service apiVersion: v1 @@ -17,3 +18,4 @@ spec: - name: udp-111 port: 111 protocol: UDP +{{ end }} diff --git a/test/dvp-over-dvp/nested-sa-config/gen-sa.sh b/test/dvp-over-dvp/nested-sa-config/gen-sa.sh new file mode 100644 index 0000000000..02e01b5e55 --- /dev/null +++ b/test/dvp-over-dvp/nested-sa-config/gen-sa.sh @@ -0,0 +1,184 @@ +#!/usr/bin/env bash + +# Copyright 2025 Flant JSC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +get_current_date() { + date +"%H:%M:%S %d-%m-%Y" +} + +get_timestamp() { + date +%s +} + +# Colors for output +RED='\033[0;31m' +GREEN='\033[0;32m' +YELLOW='\033[1;33m' +BLUE='\033[0;34m' +CYAN='\033[0;36m' +NC='\033[0m' # No Color + +log_info() { + local message="$1" + local timestamp=$(get_current_date) + echo -e "${BLUE}[INFO]${NC} $message" + if [ -n "$LOG_FILE" ]; then + echo "[$timestamp] [INFO] $message" >> "$LOG_FILE" + fi +} + +log_success() { + local message="$1" + local timestamp=$(get_current_date) + echo -e "${GREEN}[SUCCESS]${NC} $message" + if [ -n "$LOG_FILE" ]; then + echo "[$timestamp] [SUCCESS] $message" >> "$LOG_FILE" + fi +} + +log_warning() { + local message="$1" + local timestamp=$(get_current_date) + echo -e "${YELLOW}[WARNING]${NC} $message" + if [ -n "$LOG_FILE" ]; then + echo "[$timestamp] [WARNING] $message" >> "$LOG_FILE" + fi +} + +log_error() { + local message="$1" + local timestamp=$(get_current_date) + echo -e "${RED}[ERROR]${NC} $message" + if [ -n "$LOG_FILE" ]; then + echo "[$timestamp] [ERROR] $message" >> "$LOG_FILE" + fi +} + +exit_trap() { + echo "" + log_info "Exiting..." + echo "" + exit 0 +} + +kubectl() { + sudo /opt/deckhouse/bin/kubectl $@ +} + +trap exit_trap SIGINT SIGTERM + + +SA_NAME=$1 +CLUSTER_PREFIX=$2 +CLUSTER_NAME=$3 +FILE_NAME=$4 + +if [[ -z "$SA_NAME" ]] || [[ -z "$CLUSTER_PREFIX" ]] || [[ -z "$CLUSTER_NAME" ]]; then + log_error "Usage: gen-sa.sh [FILE_NAME]" + exit 1 +fi + +if [[ -z "$FILE_NAME" ]]; then + FILE_NAME=/tmp/kube.config +fi + +SA_TOKEN=virt-${CLUSTER_PREFIX}-${SA_NAME}-token +SA_CAR_NAME=virt-${CLUSTER_PREFIX}-${SA_NAME} + +USER_NAME=${SA_NAME} +CONTEXT_NAME=${CLUSTER_NAME}-${USER_NAME} + +if kubectl cluster-info > /dev/null 2>&1; then + log_success "Access to Kubernetes cluster exists." +else + log_error "No access to Kubernetes cluster or configuration issue." + exit 1 +fi + +sleep 2 +log_info "====" +log_info "Kubeconfig will be created successfully if you connected to k8s cluster via ssh tunnel or directly" +log_info "====" +sleep 2 + + +log_info "Apply SA, Secrets and ClusterAuthorizationRule" +kubectl apply -f -< /etc/ceph/ceph.conf + [global] + mon_host = $(sed 's/[a-z]=//g' /etc/rook/mon-endpoints) + EOF + + cat << EOF > /etc/ceph/ceph.client.admin.keyring + [$ROOK_CEPH_USERNAME] + key = $ROOK_CEPH_SECRET + EOF + env: + - name: ROOK_CEPH_USERNAME + valueFrom: + secretKeyRef: + key: ceph-username + name: rook-ceph-mon + - name: ROOK_CEPH_SECRET + valueFrom: + secretKeyRef: + key: ceph-secret + name: rook-ceph-mon + volumeMounts: + - mountPath: /etc/ceph + name: ceph-config + - mountPath: /etc/rook + name: mon-endpoint-volume + containers: + - name: ceph-tools + command: + - sleep + - infinity + image: quay.io/ceph/ceph:v18.2.2 + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + tty: true + workingDir: /var/lib/ceph + volumeMounts: + - mountPath: /etc/ceph + name: ceph-config + - mountPath: /var/lib/ceph + name: homedir + securityContext: + runAsGroup: 167 + runAsNonRoot: true + runAsUser: 167 + volumes: + - name: mon-endpoint-volume + configMap: + defaultMode: 420 + items: + - key: data + path: mon-endpoints + name: rook-ceph-mon-endpoints + - name: ceph-config + emptyDir: {} + - name: homedir + emptyDir: {} diff --git a/test/dvp-over-dvp/storage/ceph/ceph-configure.sh b/test/dvp-over-dvp/storage/ceph/ceph-configure.sh new file mode 100644 index 0000000000..aad18a1bf5 --- /dev/null +++ b/test/dvp-over-dvp/storage/ceph/ceph-configure.sh @@ -0,0 +1,85 @@ +#!/usr/bin/env bash + +# Copyright 2025 Flant JSC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +ceph_user_pool=ceph-rbd-pool-r2 +echo "Use user $ceph_user_pool" +echo "Set permissions for user $ceph_user_pool (mgr 'allow *' mon 'allow *' osd 'allow *' mds 'allow *')" +usr=$(kubectl -n d8-operator-ceph exec deployments/rook-ceph-tools -c ceph-tools -- \ + ceph auth get-or-create client.$ceph_user_pool mon 'allow *' mgr 'allow *' osd "allow *") +echo "Get fsid" +fsid=$(kubectl -n d8-operator-ceph exec deployments/rook-ceph-tools -c ceph-tools -- ceph fsid) + +userKey="${usr#*key = }" +ceph_monitors_ip=$(kubectl -n d8-operator-ceph get svc | grep mon | awk '{print $3}') +monitors_yaml=$( + for monitor_ip in $ceph_monitors_ip; do + echo " - $monitor_ip:6789" + done +) + +# Verify we have monitors +if [ -z "$monitors_yaml" ]; then + echo "ERROR: No Ceph monitors found" + exit 1 +fi + +echo "Create CephClusterConnection" +kubectl apply -f - <> "${manifest}" +--- +apiVersion: storage.deckhouse.io/v1alpha1 +kind: LVMVolumeGroup +metadata: + name: vg-data-${node_name}-${dev_path} +spec: + actualVGNameOnTheNode: vg-thin-data + type: Local + local: + nodeName: ${dev_node} + blockDeviceSelector: + matchExpressions: + - key: kubernetes.io/metadata.name + operator: In + values: + - ${dev_name} + thinPools: + - name: thin-data + size: ${LVMVG_SIZE} + allocationLimit: 100% +EOF + +done + +kubectl apply -f "${manifest}" diff --git a/test/dvp-over-dvp/storage/sds-replicated/mc.yaml b/test/dvp-over-dvp/storage/sds-replicated/mc.yaml new file mode 100644 index 0000000000..b7d6abda99 --- /dev/null +++ b/test/dvp-over-dvp/storage/sds-replicated/mc.yaml @@ -0,0 +1,32 @@ +--- +apiVersion: deckhouse.io/v1alpha1 +kind: ModuleConfig +metadata: + name: sds-node-configurator +spec: + version: 1 + enabled: true +--- +apiVersion: deckhouse.io/v1alpha1 +kind: ModuleConfig +metadata: + name: sds-replicated-volume +spec: + version: 1 + enabled: true +--- +apiVersion: deckhouse.io/v1alpha2 +kind: ModulePullOverride +metadata: + name: sds-node-configurator +spec: + imageTag: main + scanInterval: 15s +--- +apiVersion: deckhouse.io/v1alpha2 +kind: ModulePullOverride +metadata: + name: sds-replicated-volume +spec: + imageTag: main + scanInterval: 15s diff --git a/test/dvp-over-dvp/storage/sds-replicated/rsc-gen.sh b/test/dvp-over-dvp/storage/sds-replicated/rsc-gen.sh new file mode 100644 index 0000000000..7d93443620 --- /dev/null +++ b/test/dvp-over-dvp/storage/sds-replicated/rsc-gen.sh @@ -0,0 +1,87 @@ +#!/usr/bin/env bash + +# Copyright 2025 Flant JSC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +manifest=sds-rsp-rsc.yaml +replicatedStoragePoolName=thin-data + +pools=$(kubectl get lvmvolumegroup -o json | jq '.items[] | {name: .metadata.name, thinPoolName: .spec.thinPools[0].name}' -rc) + +cat << EOF > "${manifest}" +--- +apiVersion: storage.deckhouse.io/v1alpha1 +kind: ReplicatedStoragePool +metadata: + name: $replicatedStoragePoolName +spec: + type: LVMThin + lvmVolumeGroups: +EOF + +for pool in ${pools}; do + vg_name=$(echo $pool | jq -r '.name'); + pool_node=$(echo $pool | jq -r '.thinPoolName'); + echo "${pool_node} ${vg_name}" +cat << EOF >> "${manifest}" + - name: ${vg_name} + thinPoolName: ${pool_node} +EOF +done + +cat << EOF >> "${manifest}" +--- +apiVersion: storage.deckhouse.io/v1alpha1 +kind: ReplicatedStorageClass +metadata: + name: nested-thin-r2 +spec: + replication: Availability + storagePool: $replicatedStoragePoolName + reclaimPolicy: Delete + volumeAccess: PreferablyLocal + topology: Ignored +--- +apiVersion: storage.deckhouse.io/v1alpha1 +kind: ReplicatedStorageClass +metadata: + name: nested-thin-r1 +spec: + replication: None + storagePool: $replicatedStoragePoolName + reclaimPolicy: Delete + volumeAccess: PreferablyLocal + topology: Ignored +--- +apiVersion: storage.deckhouse.io/v1alpha1 +kind: ReplicatedStorageClass +metadata: + name: nested-thin-r1-immediate +spec: + replication: None + storagePool: $replicatedStoragePoolName + reclaimPolicy: Delete + volumeAccess: Any + topology: Ignored +EOF + +kubectl apply -f ${manifest} + +DEFAULT_STORAGE_CLASS=nested-thin-r1 +kubectl patch mc global --type='json' -p='[{"op": "replace", "path": "/spec/settings/defaultClusterStorageClass", "value": "'"$DEFAULT_STORAGE_CLASS"'"}]' + +sleep 2 +echo "Showing Storage Classes" +kubectl get storageclass +echo " " diff --git a/test/dvp-over-dvp/tools/deckhouse-queue.sh b/test/dvp-over-dvp/tools/deckhouse-queue.sh new file mode 100644 index 0000000000..cada5c5a46 --- /dev/null +++ b/test/dvp-over-dvp/tools/deckhouse-queue.sh @@ -0,0 +1,145 @@ +#!/usr/bin/env bash + +# Copyright 2025 Flant JSC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Colors for output +RED='\033[0;31m' +GREEN='\033[0;32m' +YELLOW='\033[1;33m' +BLUE='\033[0;34m' +CYAN='\033[0;36m' +NC='\033[0m' # No Color + +get_current_date() { + date +"%H:%M:%S %d-%m-%Y" +} + +get_timestamp() { + date +%s +} + +log_info() { + local message="$1" + local timestamp=$(get_current_date) + echo -e "[$timestamp] ${BLUE}[INFO]${NC} $message" +} + +log_success() { + local message="$1" + local timestamp=$(get_current_date) + echo -e "[$timestamp] ${GREEN}[SUCCESS]${NC} $message" +} + +log_warning() { + local message="$1" + local timestamp=$(get_current_date) + echo -e "[$timestamp] ${YELLOW}[WARNING]${NC} $message" +} + +log_error() { + local message="$1" + local timestamp=$(get_current_date) + echo -e "[$timestamp] ${RED}[ERROR]${NC} $message" +} + +kubectl() { + /opt/deckhouse/bin/kubectl $@ + # sudo /opt/deckhouse/bin/kubectl $@ +} + +d8() { + /opt/deckhouse/bin/d8 $@ + # sudo /opt/deckhouse/bin/d8 $@ +} + + +d8_queue_main() { + echo "$( d8 p queue main | grep -Po '(?<=length )([0-9]+)' )" +} + +d8_queue_list() { + d8 p queue list | grep -Po '([0-9]+)(?= active)' +} + +d8_queue() { + local count=90 + # local main_queue_ready=false + local list_queue_ready=false + + for i in $(seq 1 $count) ; do + # if [ $(d8_queue_main) == "0" ]; then + # echo "main queue is clear" + # main_queue_ready=true + # else + # echo "Show main queue" + # d8 p queue main | head -n25 || echo "Failed to retrieve main queue" + # fi + + if [ $(d8_queue_list) == "0" ]; then + echo "list queue list is clear" + list_queue_ready=true + else + echo "Show queue list" + d8 p queue list | head -n25 || echo "Failed to retrieve queue" + fi + + if [ "$list_queue_ready" = true ]; then + # if [ "$main_queue_ready" = true ] && [ "$list_queue_ready" = true ]; then + break + fi + echo "Wait until queues are empty ${i}/${count}" + sleep 10 + done +} + +d8_ready() { + local ready=false + local count=60 + common_start_time=$(get_timestamp) + for i in $(seq 1 $count) ; do + start_time=$(get_timestamp) + if kubectl -n d8-system wait deploy/deckhouse --for condition=available --timeout=20s 2>/dev/null; then + ready=true + break + fi + end_time=$(get_timestamp) + difference=$((end_time - start_time)) + log_info "Wait until deckhouse is ready ${i}/${count} after ${difference}s" + if (( i % 5 == 0 )); then + kubectl -n d8-system get pods + d8 p queue list | head -n25 || echo "Failed to retrieve queue" + fi + done + + if [ "$ready" = true ]; then + log_success "Deckhouse is Ready!" + echo "Checking queues" + d8_queue + else + common_end_time=$(get_timestamp) + common_difference=$((common_end_time - common_start_time)) + common_formatted_difference=$(date -u +'%H:%M:%S' -d "@$common_difference") + log_error "Deckhouse is not ready after ${count} attempts and ${common_formatted_difference} time, check its queue for errors:" + d8 p queue main | head -n25 + exit 1 + fi +} + +start_time=$(get_timestamp) +log_info "Checking that deckhouse is ready" +d8_ready +end_time=$(get_timestamp) +difference=$((end_time - start_time)) +log_success "Deckhouse is ready after $(date -ud "@$difference" +'%H:%M:%S')" diff --git a/test/dvp-over-dvp/values.example.yaml b/test/dvp-over-dvp/values.example.yaml deleted file mode 100644 index a25055d26d..0000000000 --- a/test/dvp-over-dvp/values.example.yaml +++ /dev/null @@ -1,33 +0,0 @@ -storageClass: rv-thin-r1 -namespace: kek -clusterConfigurationPrefix: demo-cluster -sa: dkp-sa -deckhouse: - tag: main - kubernetesVersion: Automatic - registryDockerCfg: # <-- Put license key here -virtualization: - tag: main -image: - url: https://89d64382-20df-4581-8cc7-80df331f67fa.selstorage.ru/ubuntu/noble-server-cloudimg-amd64.img - defaultUser: ubuntu - bootloader: EFI -ingressHosts: - - api - - grafana - - dex - - prometheus - - console - - virtualization -instances: - masterNodes: - count: 1 - cores: 4 - coreFraction: 50% - memory: 14Gi - additionalNodes: - - name: worker - count: 3 - cores: 4 - coreFraction: 50% - memory: 12Gi diff --git a/test/e2e/scripts/task_run_ci.sh b/test/e2e/scripts/task_run_ci.sh index 9a3e7a8457..276b961ed6 100755 --- a/test/e2e/scripts/task_run_ci.sh +++ b/test/e2e/scripts/task_run_ci.sh @@ -21,7 +21,12 @@ echo "DATE=$DATE" >> $GITHUB_ENV START_TIME=$(date +"%H:%M:%S") echo "START_TIME=$START_TIME" >> $GITHUB_ENV -go tool ginkgo -v --race --timeout=$TIMEOUT | tee $GINKGO_RESULT +if [[ -n $FOCUS ]];then + go tool ginkgo --focus "$FOCUS" -v --race --timeout=$TIMEOUT | tee $GINKGO_RESULT +else + go tool ginkgo -v --race --timeout=$TIMEOUT | tee $GINKGO_RESULT +fi + EXIT_CODE="${PIPESTATUS[0]}" RESULT=$(sed -e "s/\x1b\[[0-9;]*m//g" $GINKGO_RESULT | grep --color=never -E "FAIL!|SUCCESS!") if [[ $RESULT == FAIL!* || $EXIT_CODE -ne "0" ]]; then From a9ce2e503ce0eba9aedce48359300eb5f14e83bd Mon Sep 17 00:00:00 2001 From: Nikita Korolev Date: Fri, 12 Dec 2025 17:25:35 +0300 Subject: [PATCH 05/71] add additional check Signed-off-by: Nikita Korolev --- .github/workflows/e2e-reusable-pipeline.yml | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/.github/workflows/e2e-reusable-pipeline.yml b/.github/workflows/e2e-reusable-pipeline.yml index 07674a1a29..3ca5528e07 100644 --- a/.github/workflows/e2e-reusable-pipeline.yml +++ b/.github/workflows/e2e-reusable-pipeline.yml @@ -722,8 +722,15 @@ jobs: sleep 10 done } - + echo "Checking virtualization module is on" + if [ "$(kubectl get mc virtualization -o jsonpath='{.spec.enabled}')" != "true" ]; then + echo "Virtualization module is not enabled" + echo "Enabling virtualization module" + kubectl patch mc virtualization -p '{"spec":{"enabled": true}}' --type merge + fi + d8_queue + # kubectl -n d8-virtualization get pods || echo "ns virtualization is not ready" for i in {1..60}; do From 880990e4c44ecc459a30a8f42f7d626d7a3a6972 Mon Sep 17 00:00:00 2001 From: Nikita Korolev Date: Mon, 15 Dec 2025 20:29:30 +0300 Subject: [PATCH 06/71] add statis-cluster config Signed-off-by: Nikita Korolev --- test/dvp-static-cluster/Taskfile.yaml | 367 ++++++++++++++++++ .../charts/cluster-config/Chart.yaml | 24 ++ .../cluster-config/templates/_helpers.tpl | 5 + .../templates/cluster-config.yaml | 26 ++ .../templates/disabled-modules.yaml | 11 + .../templates/master-nodes.yaml | 46 +++ .../cluster-config/templates/modules-cse.yaml | 18 + .../templates/modules-dvp-base.yaml | 261 +++++++++++++ .../templates/modules-minimal.yaml | 74 ++++ .../charts/cluster-config/templates/nfs.yaml | 43 ++ .../charts/cluster-config/templates/ngc.yaml | 54 +++ .../cluster-config/templates/nodes.yaml | 45 +++ .../cluster-config/templates/ssh-creds.yaml | 8 + .../templates/virtualization.yaml | 31 ++ .../charts/infra/.helmignore | 23 ++ .../charts/infra/Chart.yaml | 24 ++ .../charts/infra/templates/_helpers.tpl | 123 ++++++ .../charts/infra/templates/ingress.yaml | 74 ++++ .../infra/templates/jump-host/deploy.yaml | 38 ++ .../infra/templates/jump-host/ingress.yaml | 40 ++ .../charts/infra/templates/jump-host/svc.yaml | 29 ++ .../charts/infra/templates/ns.yaml | 4 + .../charts/infra/templates/vi.yaml | 12 + .../charts/infra/templates/vmc.yaml | 7 + .../charts/infra/templates/vms.yaml | 12 + .../nested-sa-config/gen-sa.sh | 184 +++++++++ .../storage/ceph/00-ms.yaml | 10 + .../storage/ceph/01-mc.yaml | 36 ++ .../storage/ceph/02-sa.yaml | 74 ++++ .../storage/ceph/03-cm.yaml | 245 ++++++++++++ .../storage/ceph/04-cluster.yaml | 111 ++++++ .../storage/ceph/05-blockpool.yaml | 13 + .../storage/ceph/06-toolbox.yaml | 83 ++++ .../storage/ceph/ceph-configure.sh | 85 ++++ .../storage/sds-replicated/lvg-gen.sh | 57 +++ .../storage/sds-replicated/mc.yaml | 32 ++ .../storage/sds-replicated/rsc-gen.sh | 87 +++++ 37 files changed, 2416 insertions(+) create mode 100644 test/dvp-static-cluster/Taskfile.yaml create mode 100644 test/dvp-static-cluster/charts/cluster-config/Chart.yaml create mode 100644 test/dvp-static-cluster/charts/cluster-config/templates/_helpers.tpl create mode 100644 test/dvp-static-cluster/charts/cluster-config/templates/cluster-config.yaml create mode 100644 test/dvp-static-cluster/charts/cluster-config/templates/disabled-modules.yaml create mode 100644 test/dvp-static-cluster/charts/cluster-config/templates/master-nodes.yaml create mode 100644 test/dvp-static-cluster/charts/cluster-config/templates/modules-cse.yaml create mode 100644 test/dvp-static-cluster/charts/cluster-config/templates/modules-dvp-base.yaml create mode 100644 test/dvp-static-cluster/charts/cluster-config/templates/modules-minimal.yaml create mode 100644 test/dvp-static-cluster/charts/cluster-config/templates/nfs.yaml create mode 100644 test/dvp-static-cluster/charts/cluster-config/templates/ngc.yaml create mode 100644 test/dvp-static-cluster/charts/cluster-config/templates/nodes.yaml create mode 100644 test/dvp-static-cluster/charts/cluster-config/templates/ssh-creds.yaml create mode 100644 test/dvp-static-cluster/charts/cluster-config/templates/virtualization.yaml create mode 100644 test/dvp-static-cluster/charts/infra/.helmignore create mode 100644 test/dvp-static-cluster/charts/infra/Chart.yaml create mode 100644 test/dvp-static-cluster/charts/infra/templates/_helpers.tpl create mode 100644 test/dvp-static-cluster/charts/infra/templates/ingress.yaml create mode 100644 test/dvp-static-cluster/charts/infra/templates/jump-host/deploy.yaml create mode 100644 test/dvp-static-cluster/charts/infra/templates/jump-host/ingress.yaml create mode 100644 test/dvp-static-cluster/charts/infra/templates/jump-host/svc.yaml create mode 100644 test/dvp-static-cluster/charts/infra/templates/ns.yaml create mode 100644 test/dvp-static-cluster/charts/infra/templates/vi.yaml create mode 100644 test/dvp-static-cluster/charts/infra/templates/vmc.yaml create mode 100644 test/dvp-static-cluster/charts/infra/templates/vms.yaml create mode 100644 test/dvp-static-cluster/nested-sa-config/gen-sa.sh create mode 100644 test/dvp-static-cluster/storage/ceph/00-ms.yaml create mode 100644 test/dvp-static-cluster/storage/ceph/01-mc.yaml create mode 100644 test/dvp-static-cluster/storage/ceph/02-sa.yaml create mode 100644 test/dvp-static-cluster/storage/ceph/03-cm.yaml create mode 100644 test/dvp-static-cluster/storage/ceph/04-cluster.yaml create mode 100644 test/dvp-static-cluster/storage/ceph/05-blockpool.yaml create mode 100644 test/dvp-static-cluster/storage/ceph/06-toolbox.yaml create mode 100644 test/dvp-static-cluster/storage/ceph/ceph-configure.sh create mode 100755 test/dvp-static-cluster/storage/sds-replicated/lvg-gen.sh create mode 100644 test/dvp-static-cluster/storage/sds-replicated/mc.yaml create mode 100644 test/dvp-static-cluster/storage/sds-replicated/rsc-gen.sh diff --git a/test/dvp-static-cluster/Taskfile.yaml b/test/dvp-static-cluster/Taskfile.yaml new file mode 100644 index 0000000000..c73e1e3a70 --- /dev/null +++ b/test/dvp-static-cluster/Taskfile.yaml @@ -0,0 +1,367 @@ +# https://taskfile.dev + +version: "3" + + +includes: + vm: + taskfile: Taskfile.vm.yaml + +vars: + NAMESPACE: + sh: yq eval '.namespace' values.yaml + D8_TAG: + sh: yq eval '.deckhouse.tag' values.yaml + TMP_DIR: ./tmp + SSH_DIR: "{{ .TMP_DIR }}/ssh" + SSH_FILE_NAME: cloud + SSH_PUB_KEY_FILE: "{{ .SSH_DIR }}/{{ .SSH_FILE_NAME }}.pub" + SSH_PRIV_KEY_FILE: "{{ .SSH_DIR }}/{{ .SSH_FILE_NAME }}" + DISCOVERED_VALUES_FILE: tmp/discovered-values.yaml + PASSWORD_FILE: "{{ .TMP_DIR }}/password.txt" + PASSWORD_HASH_FILE: "{{ .TMP_DIR }}/password-hash.txt" +tasks: + create-tmp-dir: + desc: Preflight / Create tmp dir + cmds: + - mkdir -p "{{ .TMP_DIR }}" + + ssh-gen: + desc: Preflight / Generate ssh keypair for jump-host + deps: + - create-tmp-dir + cmds: + - mkdir -p "{{ .SSH_DIR }}" + - yes | ssh-keygen -t ed25519 -b 1024 -f {{ .SSH_PRIV_KEY_FILE }} -N "" -C "cloud" -v + - chmod 0600 "{{ .SSH_PUB_KEY_FILE }}" + - chmod 0400 "{{ .SSH_PRIV_KEY_FILE }}" + status: + - test -f "{{ .SSH_PRIV_KEY_FILE }}" + + password-gen: + desc: Preflight / Generate password + cmds: + - date +%s | sha256sum | base64 | head -c 10 > {{ .PASSWORD_FILE }} + - | + echo $(cat {{ .TMP_DIR }}/password.txt) | htpasswd -BinC 10 "" | cut -d: -f2 | base64 -w0 > {{ .PASSWORD_HASH_FILE }} + status: + - test -f "{{ .PASSWORD_FILE }}" + - test -f "{{ .PASSWORD_HASH_FILE }}" + + + generate-helm-values: + desc: Generate helm values + deps: + - ssh-gen + - password-gen + - create-tmp-dir + cmds: + - touch {{ .DISCOVERED_VALUES_FILE }} + - | + export SSH_PUB_KEY="$(cat {{ .SSH_PUB_KEY_FILE }})" + yq eval --inplace '.discovered.publicSSHKey = env(SSH_PUB_KEY)' {{ .DISCOVERED_VALUES_FILE }} + - | + export SSH_PRIV_KEY_B64="$(cat {{ .SSH_PRIV_KEY_FILE }} | base64 -w 0)" + yq eval --inplace '.discovered.privateSSHKeyBase64 = env(SSH_PRIV_KEY_B64)' {{ .DISCOVERED_VALUES_FILE }} + - | + export DOMAIN=$(kubectl get mc global -o json | jq '.spec.settings.modules.publicDomainTemplate | split(".")[1:] | join(".")' -rc) + yq eval --inplace '.discovered.domain = env(DOMAIN)' {{ .DISCOVERED_VALUES_FILE }} + - | + export CLUSTER_DOMAIN=$(kubectl -n d8-system exec -it svc/deckhouse-leader -- deckhouse-controller global values -o json | jq -rc .clusterConfiguration.clusterDomain) + yq eval --inplace '.discovered.clusterDomain = env(CLUSTER_DOMAIN)' {{ .DISCOVERED_VALUES_FILE }} + - | + if kubectl -n {{ .NAMESPACE }} get vm -o name 2>/dev/null | grep -q .; then + export VM_IPS=$(kubectl -n {{ .NAMESPACE }} get vm -o json | jq -r '[.items[] | select(.status.ipAddress != null) | .metadata.name + ": " + .status.ipAddress] | join("\n")') + yq eval --inplace '.discovered.vmIPs = env(VM_IPS)' {{ .DISCOVERED_VALUES_FILE }} + else + yq eval --inplace '.discovered.vmIPs = {}' {{ .DISCOVERED_VALUES_FILE }} + fi + - | + export PASSWORD_HASH="$(cat {{ .PASSWORD_HASH_FILE }})" + yq eval --inplace '.discovered.passwordHash = env(PASSWORD_HASH)' {{ .DISCOVERED_VALUES_FILE }} + + render-infra: + desc: Preparation / Generate infra manifests + deps: + - ssh-gen + - generate-helm-values + cmds: + - helm template static-dvp-over-dvp-infra ./charts/infra -f values.yaml -f {{ .DISCOVERED_VALUES_FILE }} > {{ .TMP_DIR }}/infra.yaml + + infra-deploy: + deps: + - render-infra + desc: Deploy infra (Namespace/RBAC/Jumphost) + vars: + start_time: + sh: date +%s + cmds: + - kubectl apply -f {{ .TMP_DIR }}/infra.yaml + - kubectl -n {{ .NAMESPACE }} wait --for=condition=Ready pod -l app=jump-host --timeout=300s + - kubectl -n {{ .NAMESPACE }} get vi -o name | xargs kubectl -n {{ .NAMESPACE }} wait --for='jsonpath={.status.phase}=Ready' --timeout=600s + - kubectl -n {{ .NAMESPACE }} get vd -o name | xargs kubectl -n {{ .NAMESPACE }} wait --for='jsonpath={.status.phase}=Ready' --timeout=600s + - kubectl -n {{ .NAMESPACE }} get vm -o name | xargs kubectl -n {{ .NAMESPACE }} wait --for='jsonpath={.status.phase}=Running' --timeout=600s + - kubectl -n {{ .NAMESPACE }} get vm -o name | xargs kubectl -n {{ .NAMESPACE }} wait --for='jsonpath={.status.conditions[?(@.type=="AgentReady")].status}=True' --timeout=300s + - | + export end_time=$(date +%s) + difference=$((end_time - {{.start_time}})) + date -ud "@$difference" +'%H:%M:%S' + + infra-undeploy: + desc: Destroy infra + aliases: + - uninstall + prompt: This command will destroy current infra... Do you want to continue? + cmds: + - kubectl delete -f {{ .TMP_DIR }}/infra.yaml || true + - kubectl wait --for=delete namespace/{{ .NAMESPACE }} --timeout 300s || true + + + render-cluster-config: + desc: Preparation / Generate cluster config (infra required) + deps: + - ssh-gen + - generate-helm-values + cmds: + - helm template dvp-over-static-dvp-cluster-config ./charts/cluster-config -f values.yaml -f {{ .DISCOVERED_VALUES_FILE }} > {{ .TMP_DIR }}/config.yaml + + render-cluster-manifests: + desc: Preparation / Generate cluster config without cluster bootstrap configs (infra required) + deps: + - render-cluster-config + cmds: + - yq 'select( (.apiVersion + "/" + .kind) != "deckhouse.io/v1/InitConfiguration" and (.apiVersion + "/" + .kind) != "deckhouse.io/v1/ClusterConfiguration" and (.apiVersion + "/" + .kind) != "deckhouse.io/v1/StaticClusterConfiguration" )' {{ .TMP_DIR }}/config.yaml > {{ .TMP_DIR }}/config-manifests.yaml + + render-all: + desc: Generate all manifests + cmds: + - task render-infra + - task render-cluster-config + - task render-cluster-manifests + + update-cluster: + desc: Update cluster + deps: + - render-cluster-manifests + cmds: + - rsync -azv -e "ssh -i {{ .SSH_PRIV_KEY_FILE }} -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o 'ProxyCommand=d8 v port-forward --stdio=true master-0.{{ .NAMESPACE }} 22'" {{ .TMP_DIR }}/config-manifests.yaml cloud@master-0:/tmp/config-manifests.yaml + - task: __ssh-command + vars: + CMD: sudo /opt/deckhouse/bin/kubectl apply -f /tmp/config-manifests.yaml + + dhctl-bootstrap: + desc: Bootstrap DKP over DVP + deps: + - render-cluster-config + vars: + DeckhouseInstallImage: + sh: | + if $(yq eval '.cse' values.yaml); then + echo "dev-registry-cse.deckhouse.ru/sys/deckhouse-cse/install" + else + echo "dev-registry.deckhouse.io/sys/deckhouse-oss/install" + fi + start_time: + sh: date +%s + JUMPHOST_EXT_IP: + sh: kubectl -n {{ .NAMESPACE }} exec -it deployment/jump-host -- dig @resolver4.opendns.com myip.opendns.com +short + JUMPHOST_NODEPORT: + sh: kubectl -n {{ .NAMESPACE }} get svc jump-host -o json | jq '.spec.ports[] | select(.port==2222) | .nodePort' + MASTER_NODE_IP: + sh: kubectl -n {{ .NAMESPACE }} get vm master-0 -o jsonpath="{.status.ipAddress}" + cmds: + - | + docker run --pull=always -it \ + -v "{{ .TMP_DIR }}/config.yaml:/config.yaml" \ + -v "{{ .SSH_DIR }}:/tmp/.ssh/" \ + {{ .DeckhouseInstallImage }}:{{ .D8_TAG }} \ + dhctl bootstrap \ + --config=/config.yaml \ + --ssh-agent-private-keys=/tmp/.ssh/{{ .SSH_FILE_NAME }} \ + --ssh-host={{ .MASTER_NODE_IP }} \ + --ssh-user=cloud \ + --ssh-bastion-port={{ .JUMPHOST_NODEPORT }} \ + --ssh-bastion-host={{ .JUMPHOST_EXT_IP }} \ + --ssh-bastion-user=user \ + {{.CLI_ARGS}} + - | + export end_time=$(date +%s) + difference=$((end_time - {{.start_time}})) + date -ud "@$difference" +'%H:%M:%S' + + show-connection-info: + desc: Show connection info + vars: + DOMAIN: + sh: yq eval '.discovered.domain' {{ .DISCOVERED_VALUES_FILE }} + PASSWORD: + sh: cat {{ .PASSWORD_FILE }} + JUMPHOST_EXT_IP: + sh: kubectl -n {{ .NAMESPACE }} exec -it deployment/jump-host -- dig @resolver4.opendns.com myip.opendns.com +short + JUMPHOST_NODEPORT: + sh: kubectl -n {{ .NAMESPACE }} get svc jump-host -o json | jq '.spec.ports[] | select(.port==2222) | .nodePort' + MASTER_NODE_NAME: + sh: kubectl get node -l node.deckhouse.io/group=master -o jsonpath="{.items[0].metadata.name}" + + silent: true + cmds: + - echo "Connect to master task ssh-to-master" + - | + echo "Host cluster master node: {{ .MASTER_NODE_NAME }}" + echo "Host cluster grafana URL: https://grafana.{{ .DOMAIN }}" + echo "Namespace: {{ .NAMESPACE }}" + echo "ssh-pub key:" + cat {{ .SSH_PUB_KEY_FILE }} + echo "ssh-priv key:" + cat {{ .SSH_PRIV_KEY_FILE }} + echo "OS User: cloud" + echo "Bastion: user@{{ .JUMPHOST_EXT_IP }}:{{ .JUMPHOST_NODEPORT }}" + echo vms: + kubectl -n {{ .NAMESPACE }} get vm + echo "Grafana URL https://grafana.{{ .NAMESPACE }}.{{ .DOMAIN }}" + echo "Default user/password admin@deckhouse.io/{{ .PASSWORD}}" + + install: + cmds: + - task: infra-deploy + - task: dhctl-bootstrap + - task: show-connection-info + + kill-dvp-resources: + cmds: + - kubectl -n {{ .NAMESPACE }} delete vm --all --force --grace-period=0 + - kubectl -n {{ .NAMESPACE }} delete vd --all --force --grace-period=0 + + ssh-to-master: + cmds: + - /usr/bin/ssh -i {{ .SSH_PRIV_KEY_FILE }} -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o 'ProxyCommand=d8 v port-forward --stdio=true master-0.{{ .NAMESPACE }} 22' cloud@master-0 + + ssh-to-worker: + cmds: + - /usr/bin/ssh -i {{ .SSH_PRIV_KEY_FILE }} -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o 'ProxyCommand=d8 v port-forward --stdio=true worker-0.{{ .NAMESPACE }} 22' cloud@worker-0 + + ssh-to-master-via-jumphost: + vars: + SSH_AGENT_SOCK: "{{ .SSH_DIR }}/{{ .SSH_FILE_NAME }}.sock" + JUMPHOST_EXT_IP: + sh: kubectl -n {{ .NAMESPACE }} exec -it deployment/jump-host -- dig @resolver4.opendns.com myip.opendns.com +short + JUMPHOST_NODEPORT: + sh: kubectl -n {{ .NAMESPACE }} get svc jump-host -o json | jq '.spec.ports[] | select(.port==2222) | .nodePort' + MASTER_NODE_IP: + sh: kubectl -n {{ .NAMESPACE }} get vm master-0 -o jsonpath="{.status.ipAddress}" + cmds: + # kill ssh-agent + - ps aux | grep '{{ .SSH_AGENT_SOCK }}' | grep -v grep | awk '{print $2}' | xargs -r kill -9 || true + # remove ssh-agent sock + - rm -rf {{ .SSH_AGENT_SOCK }} || true + # create temp ssh-agent + - eval $(ssh-agent -a {{ .SSH_AGENT_SOCK }}) + # add ssh key to agent + - SSH_AUTH_SOCK={{ .SSH_AGENT_SOCK }} ssh-add {{ .SSH_PRIV_KEY_FILE }} + # check ssh-agent + - SSH_AUTH_SOCK={{ .SSH_AGENT_SOCK }} ssh-add -l + # connect to master via jumphost + - | + SSH_AUTH_SOCK={{ .SSH_AGENT_SOCK }} \ + /usr/bin/ssh \ + -A -vv \ + -J user@{{ .JUMPHOST_EXT_IP }}:{{ .JUMPHOST_NODEPORT }} \ + -o StrictHostKeyChecking=no \ + -o UserKnownHostsFile=/dev/null \ + cloud@{{ .MASTER_NODE_IP}} + + ssh-to-master-via-ws: + vars: + SSH_AGENT_SOCK: "{{ .SSH_DIR }}/{{ .SSH_FILE_NAME }}.sock" + DOMAIN: + sh: yq eval '.discovered.domain' {{ .DISCOVERED_VALUES_FILE }} + MASTER_NODE_IP: + sh: kubectl -n {{ .NAMESPACE }} get vm master-0 -o jsonpath="{.status.ipAddress}" + cmds: + # kill wstunnel + - ps aux | grep wstunnel | grep tcp://9999:127.0.0.1:2222 | awk '{print $2}' | xargs -r kill -9 || true + # start wstunnel + - wstunnel client -L tcp://9999:127.0.0.1:2222 wss://ws.{{ .NAMESPACE }}.{{ .DOMAIN }}:443 & + # kill ssh-agent + - ps aux | grep '{{ .SSH_AGENT_SOCK }}' | grep -v grep | awk '{print $2}' | xargs -r kill -9 || true + # remove ssh-agent sock + - rm -rf {{ .SSH_AGENT_SOCK }} || true + # create temp ssh-agent + - eval $(ssh-agent -a {{ .SSH_AGENT_SOCK }}) + # add ssh key to agent + - SSH_AUTH_SOCK={{ .SSH_AGENT_SOCK }} ssh-add {{ .SSH_PRIV_KEY_FILE }} + # check ssh-agent + - SSH_AUTH_SOCK={{ .SSH_AGENT_SOCK }} ssh-add -l + # # remove known_hosts entry + - ssh-keygen -f "${HOME}/.ssh/known_hosts" -R "[127.0.0.1]:9999" + # connect to master via jumphost + - | + SSH_AUTH_SOCK={{ .SSH_AGENT_SOCK }} \ + /usr/bin/ssh \ + -A \ + -J user@127.0.0.1:9999 \ + -o StrictHostKeyChecking=no \ + -o UserKnownHostsFile=/dev/null \ + cloud@{{ .MASTER_NODE_IP}} + + clean: + cmds: + - task: infra-undeploy + - rm -rf "{{ .TMP_DIR }}" + + __ssh-command: + silent: true + internal: true + cmds: + - /usr/bin/ssh -t -i {{ .SSH_PRIV_KEY_FILE }} -o LogLevel=ERROR -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o 'ProxyCommand=d8 v port-forward --stdio=true master-0.{{ .NAMESPACE }} 22' cloud@master-0 {{ .CMD }} + + kubectl: + desc: Run kubectl on master. Usage example "task kubectl -- get pods -A" + cmds: + - task: __ssh-command + vars: + CMD: sudo /opt/deckhouse/bin/kubectl {{ .CLI_ARGS }} + + k9s: + desc: Run kubectl on master. Usage example "task kubectl -- get pods -A" + cmds: + - task: __ssh-command + vars: + CMD: TERM=xterm-256color sudo /usr/local/bin/k9s {{ .CLI_ARGS }} + + configure:storage:sds-lvg: + desc: Copy storage manifests to master + vars: + script: gen-lvg.sh + config: /tmp/sds-local-lvg.yaml + cmds: + - rsync -azv -e "ssh -i {{ .SSH_PRIV_KEY_FILE }} -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o 'ProxyCommand=d8 v port-forward --stdio=true master-0.{{ .NAMESPACE }} 22'" storage/sds-local-volume/{{ .script }} cloud@master-0:/tmp/ + - task: __ssh-command + vars: + CMD: sudo chmod +x /tmp/{{ .script }} + - task: __ssh-command + vars: + CMD: sudo /tmp/{{ .script }} + - task: __ssh-command + vars: + CMD: sudo /opt/deckhouse/bin/kubectl apply -f {{ .config }} + + configure:storage:local-sc: + desc: Copy storage manifests to master + vars: + script: gen-sc.sh + config: /tmp/sds-local-sc + cmds: + - rsync -azv -e "ssh -i {{ .SSH_PRIV_KEY_FILE }} -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o 'ProxyCommand=d8 v port-forward --stdio=true master-0.{{ .NAMESPACE }} 22'" storage/sds-local-volume/{{ .script }} cloud@master-0:/tmp/ + - task: __ssh-command + vars: + CMD: sudo chmod +x /tmp/{{ .script }} + - task: __ssh-command + vars: + CMD: sudo /tmp/{{ .script }} + - task: __ssh-command + vars: + CMD: sudo /opt/deckhouse/bin/kubectl apply -f {{ .config }}.yaml + - task: __ssh-command + vars: + CMD: sudo /opt/deckhouse/bin/kubectl apply -f {{ .config }}-multi.yaml diff --git a/test/dvp-static-cluster/charts/cluster-config/Chart.yaml b/test/dvp-static-cluster/charts/cluster-config/Chart.yaml new file mode 100644 index 0000000000..c61a43f29a --- /dev/null +++ b/test/dvp-static-cluster/charts/cluster-config/Chart.yaml @@ -0,0 +1,24 @@ +apiVersion: v2 +name: cluster-config +description: A Helm chart for Kubernetes + +# A chart can be either an 'application' or a 'library' chart. +# +# Application charts are a collection of templates that can be packaged into versioned archives +# to be deployed. +# +# Library charts provide useful utilities or functions for the chart developer. They're included as +# a dependency of application charts to inject those utilities and functions into the rendering +# pipeline. Library charts do not define any templates and therefore cannot be deployed. +type: application + +# This is the chart version. This version number should be incremented each time you make changes +# to the chart and its templates, including the app version. +# Versions are expected to follow Semantic Versioning (https://semver.org/) +version: 0.1.0 + +# This is the version number of the application being deployed. This version number should be +# incremented each time you make changes to the application. Versions are not expected to +# follow Semantic Versioning. They should reflect the version the application is using. +# It is recommended to use it with quotes. +appVersion: "1.16.0" diff --git a/test/dvp-static-cluster/charts/cluster-config/templates/_helpers.tpl b/test/dvp-static-cluster/charts/cluster-config/templates/_helpers.tpl new file mode 100644 index 0000000000..8c39b8c503 --- /dev/null +++ b/test/dvp-static-cluster/charts/cluster-config/templates/_helpers.tpl @@ -0,0 +1,5 @@ +{{- define "cluster-config.full-svc-address" -}} +{{- $ctx := index . 0 -}} +{{- $name := index . 1 -}} +{{ $name }}.{{ $ctx.Values.namespace }}.svc.{{ $ctx.Values.discovered.clusterDomain }} +{{- end }} diff --git a/test/dvp-static-cluster/charts/cluster-config/templates/cluster-config.yaml b/test/dvp-static-cluster/charts/cluster-config/templates/cluster-config.yaml new file mode 100644 index 0000000000..e3587ba75d --- /dev/null +++ b/test/dvp-static-cluster/charts/cluster-config/templates/cluster-config.yaml @@ -0,0 +1,26 @@ +--- +apiVersion: deckhouse.io/v1 +kind: ClusterConfiguration +clusterType: Static +podSubnetCIDR: 10.88.0.0/16 +podSubnetNodeCIDRPrefix: '24' +serviceSubnetCIDR: 10.99.0.0/16 +kubernetesVersion: {{ .Values.deckhouse.kubernetesVersion | quote }} +clusterDomain: "internal.{{ .Values.discovered.clusterDomain }}" +defaultCRI: ContainerdV2 +--- +apiVersion: deckhouse.io/v1 +kind: InitConfiguration +deckhouse: + {{- if .Values.cse }} + imagesRepo: dev-registry-cse.deckhouse.ru/sys/deckhouse-cse + {{- else }} + imagesRepo: dev-registry.deckhouse.io/sys/deckhouse-oss + {{- end }} + registryDockerCfg: {{ .Values.deckhouse.registryDockerCfg }} + devBranch: {{ .Values.deckhouse.tag }} +--- +apiVersion: deckhouse.io/v1 +kind: StaticClusterConfiguration +internalNetworkCIDRs: + - 10.66.0.0/16 diff --git a/test/dvp-static-cluster/charts/cluster-config/templates/disabled-modules.yaml b/test/dvp-static-cluster/charts/cluster-config/templates/disabled-modules.yaml new file mode 100644 index 0000000000..0162e92748 --- /dev/null +++ b/test/dvp-static-cluster/charts/cluster-config/templates/disabled-modules.yaml @@ -0,0 +1,11 @@ +{{- $modules := list "upmeter" "local-path-provisioner" "pod-reloader" "secret-copier" "namespace-configurator" "dashboard" -}} + +{{- range $modules }} +--- +apiVersion: deckhouse.io/v1alpha1 +kind: ModuleConfig +metadata: + name: {{ . }} +spec: + enabled: false +{{- end }} diff --git a/test/dvp-static-cluster/charts/cluster-config/templates/master-nodes.yaml b/test/dvp-static-cluster/charts/cluster-config/templates/master-nodes.yaml new file mode 100644 index 0000000000..5c2b0b1ee9 --- /dev/null +++ b/test/dvp-static-cluster/charts/cluster-config/templates/master-nodes.yaml @@ -0,0 +1,46 @@ + +{{- $totalNodes := 0 -}} +{{- range .Values.instances.additionalNodes -}} + {{- $totalNodes = add $totalNodes .count -}} +{{- end -}} + +--- +apiVersion: deckhouse.io/v1 +kind: NodeGroup +metadata: + name: master +spec: + disruptions: + approvalMode: Manual + kubelet: + containerLogMaxFiles: 4 + containerLogMaxSize: 50Mi + nodeTemplate: + {{- if eq $totalNodes 0 }} + taints: [] + {{- end }} + labels: + node-role.kubernetes.io/control-plane: "" + node-role.kubernetes.io/master: "" + nodeType: Static + staticInstances: + count: {{ .Values.instances.masterNodes.count }} + labelSelector: + matchLabels: + role: master + +{{range $_, $i := untilStep 0 (.Values.instances.masterNodes.count | int) 1}} + {{ $vmName := printf "master-%d" $i }} +--- +apiVersion: deckhouse.io/v1alpha1 +kind: StaticInstance +metadata: + name: {{ $vmName }} + labels: + role: master +spec: + address: {{ index $.Values.discovered.vmIPs $vmName }} + credentialsRef: + kind: SSHCredentials + name: mvp-static +{{- end }} diff --git a/test/dvp-static-cluster/charts/cluster-config/templates/modules-cse.yaml b/test/dvp-static-cluster/charts/cluster-config/templates/modules-cse.yaml new file mode 100644 index 0000000000..0e2be95fba --- /dev/null +++ b/test/dvp-static-cluster/charts/cluster-config/templates/modules-cse.yaml @@ -0,0 +1,18 @@ +{{ $enabled := false }} +{{- if .Values.cse }} +{{- $enabled = true }} +{{- end }} +--- +apiVersion: deckhouse.io/v1alpha1 +kind: ModuleConfig +metadata: + name: multitenancy-manager +spec: + enabled: {{ $enabled }} +--- +apiVersion: deckhouse.io/v1alpha1 +kind: ModuleConfig +metadata: + name: gost-integrity-controller +spec: + enabled: {{ $enabled }} diff --git a/test/dvp-static-cluster/charts/cluster-config/templates/modules-dvp-base.yaml b/test/dvp-static-cluster/charts/cluster-config/templates/modules-dvp-base.yaml new file mode 100644 index 0000000000..d1bd57d44b --- /dev/null +++ b/test/dvp-static-cluster/charts/cluster-config/templates/modules-dvp-base.yaml @@ -0,0 +1,261 @@ +{{- $totalNodes := .Values.instances.masterNodes.count -}} +{{- range .Values.instances.additionalNodes -}} + {{- $totalNodes = add $totalNodes .count -}} +{{- end -}} + +{{- if eq .Values.deckhouse.bundle "Default" }} +--- +apiVersion: deckhouse.io/v1alpha1 +kind: ModuleConfig +metadata: + name: user-authn +spec: + version: 2 + enabled: true + settings: + controlPlaneConfigurator: + dexCAMode: DoNotNeed + publishAPI: + enabled: true + https: + mode: SelfSigned + global: + kubeconfigGeneratorMasterCA: "" +--- +apiVersion: deckhouse.io/v1alpha1 +kind: ModuleConfig +metadata: + name: user-authz +spec: + enabled: true + settings: + enableMultiTenancy: true + version: 1 +--- +apiVersion: deckhouse.io/v1alpha1 +kind: ModuleConfig +metadata: + name: descheduler +spec: + enabled: {{ if eq $totalNodes 1 }}false{{ else }}true{{ end }} + +################################################################## +## observability +################################################################## +--- +apiVersion: deckhouse.io/v1alpha1 +kind: ModuleConfig +metadata: + name: prometheus +spec: + version: 1 + enabled: true + settings: + retentionDays: 7 + # storageClass: i-linstor-thin-r1 +--- +apiVersion: deckhouse.io/v1alpha1 +kind: ModuleConfig +metadata: + name: prompp +spec: + version: 1 + enabled: true +{{- if not .Values.cse }} +--- +apiVersion: deckhouse.io/v1alpha2 +kind: ModulePullOverride +metadata: + name: prompp +spec: + imageTag: stable + scanInterval: 15s +{{- end }} +--- +apiVersion: deckhouse.io/v1alpha1 +kind: ModuleConfig +metadata: + name: monitoring-applications +spec: + enabled: true +--- +apiVersion: deckhouse.io/v1alpha1 +kind: ModuleConfig +metadata: + name: node-local-dns +spec: + enabled: true +--- +apiVersion: deckhouse.io/v1alpha1 +kind: ModuleConfig +metadata: + name: log-shipper +spec: + enabled: true +--- +apiVersion: deckhouse.io/v1alpha1 +kind: ModuleConfig +metadata: + name: loki +spec: + settings: + # storageClass: i-linstor-thin-r1 + diskSizeGigabytes: 50 + retentionPeriodHours: 24 + storeSystemLogs: false + enabled: true + version: 1 + +################################################################## +## storage +################################################################## + +--- +apiVersion: deckhouse.io/v1alpha1 +kind: ModuleConfig +metadata: + name: snapshot-controller +spec: + enabled: true + version: 1 +{{- if not .Values.cse }} +--- +apiVersion: deckhouse.io/v1alpha2 +kind: ModulePullOverride +metadata: + name: snapshot-controller +spec: + imageTag: main + rollback: false + scanInterval: 10m0s +{{- end }} + +{{ if or .Values.modules.sdsLocalVolumeEnabled .Values.modules.sdsReplicatedVolumeEnabled }} +--- +apiVersion: deckhouse.io/v1alpha1 +kind: ModuleConfig +metadata: + name: sds-node-configurator +spec: + version: 1 + enabled: true +{{- if not .Values.cse }} +--- +apiVersion: deckhouse.io/v1alpha2 +kind: ModulePullOverride +metadata: + name: sds-node-configurator +spec: + imageTag: main + scanInterval: 15s +{{- end }} +{{ end }} + +{{ if .Values.modules.sdsLocalVolumeEnabled }} +--- +apiVersion: deckhouse.io/v1alpha1 +kind: ModuleConfig +metadata: + name: sds-local-volume +spec: + version: 1 + enabled: true +{{- if not .Values.cse }} +--- +apiVersion: deckhouse.io/v1alpha2 +kind: ModulePullOverride +metadata: + name: sds-local-volume +spec: + imageTag: main + scanInterval: 15s +{{- end }} +{{ end }} + +{{ if .Values.modules.sdsReplicatedVolumeEnabled }} +--- +apiVersion: deckhouse.io/v1alpha1 +kind: ModuleConfig +metadata: + name: sds-replicated-volume +spec: + version: 1 + enabled: true +{{- if not .Values.cse }} +--- +apiVersion: deckhouse.io/v1alpha2 +kind: ModulePullOverride +metadata: + name: sds-replicated-volume +spec: + imageTag: main + scanInterval: 15s +{{- end }} +{{ end }} + +################################################################## +## ingress +################################################################## +--- +apiVersion: deckhouse.io/v1 +kind: IngressNginxController +metadata: + name: main +spec: + inlet: HostPort + enableIstioSidecar: true + ingressClass: nginx + hostPort: + httpPort: 80 + httpsPort: 443 + nodeSelector: + node-role.kubernetes.io/master: '' + tolerations: + - effect: NoSchedule + operator: Exists + +################################################################## +## rbac +################################################################## +--- +apiVersion: deckhouse.io/v1 +kind: ClusterAuthorizationRule +metadata: + name: admin +spec: + subjects: + - kind: User + name: admin@deckhouse.io + accessLevel: SuperAdmin + portForwarding: true +--- +apiVersion: deckhouse.io/v1 +kind: User +metadata: + name: admin +spec: + email: admin@deckhouse.io + # echo "t3chn0l0gi4" | htpasswd -BinC 10 "" | cut -d: -f2 | base64 -w0 + password: {{ .Values.discovered.passwordHash }} + +################################################################## +## console +################################################################## +--- +apiVersion: deckhouse.io/v1alpha1 +kind: ModuleConfig +metadata: + name: console +spec: + enabled: true +{{- if not .Values.cse }} +--- +apiVersion: deckhouse.io/v1alpha2 +kind: ModulePullOverride +metadata: + name: console +spec: + imageTag: master + scanInterval: 15s +{{- end }} +{{ end -}} diff --git a/test/dvp-static-cluster/charts/cluster-config/templates/modules-minimal.yaml b/test/dvp-static-cluster/charts/cluster-config/templates/modules-minimal.yaml new file mode 100644 index 0000000000..20410d13f1 --- /dev/null +++ b/test/dvp-static-cluster/charts/cluster-config/templates/modules-minimal.yaml @@ -0,0 +1,74 @@ +--- +apiVersion: deckhouse.io/v1alpha1 +kind: ModuleConfig +metadata: + name: deckhouse +spec: + version: 1 + enabled: true + settings: +{{- if .Values.cse }} + allowExperimentalModules: true +{{- end }} + bundle: {{ .Values.deckhouse.bundle }} +--- +apiVersion: deckhouse.io/v1alpha1 +kind: ModuleConfig +metadata: + name: global +spec: + version: 2 + settings: + modules: + publicDomainTemplate: "%s.{{ .Values.namespace }}.{{ .Values.discovered.domain }}" + defaultClusterStorageClass: nfs +--- +apiVersion: deckhouse.io/v1alpha1 +kind: ModuleConfig +metadata: + name: cni-cilium +spec: + version: 1 + enabled: true + settings: + tunnelMode: VXLAN +--- +apiVersion: deckhouse.io/v1alpha1 +kind: ModuleConfig +metadata: + name: node-manager +spec: + enabled: true + version: 2 +--- +apiVersion: deckhouse.io/v1alpha1 +kind: ModuleConfig +metadata: + name: kube-dns +spec: + enabled: true + version: 1 +--- +apiVersion: deckhouse.io/v1alpha1 +kind: ModuleConfig +metadata: + name: registry-packages-proxy +spec: + enabled: true + version: 1 +--- +apiVersion: deckhouse.io/v1alpha1 +kind: ModuleConfig +metadata: + name: control-plane-manager +spec: + enabled: true +{{- if .Values.cse }} + settings: + apiserver: + auditPolicyEnabled: true + signature: Enforce + etcd: + maxDbSize: 6442450944 +{{- end }} + version: 2 diff --git a/test/dvp-static-cluster/charts/cluster-config/templates/nfs.yaml b/test/dvp-static-cluster/charts/cluster-config/templates/nfs.yaml new file mode 100644 index 0000000000..138069b3f3 --- /dev/null +++ b/test/dvp-static-cluster/charts/cluster-config/templates/nfs.yaml @@ -0,0 +1,43 @@ +{{- if eq .Values.deckhouse.bundle "Default" }} +--- +apiVersion: deckhouse.io/v1alpha1 +kind: ModuleConfig +metadata: + name: csi-nfs +spec: + enabled: true + source: deckhouse + version: 1 +--- +apiVersion: deckhouse.io/v1alpha2 +kind: ModulePullOverride +metadata: + name: csi-nfs +spec: + imageTag: main + scanInterval: 10m +--- +apiVersion: deckhouse.io/v1alpha1 +kind: ModuleConfig +metadata: + name: snapshot-controller +spec: + enabled: true + version: 1 +--- +apiVersion: storage.deckhouse.io/v1alpha1 +kind: NFSStorageClass +metadata: + name: nfs +spec: + connection: + host: {{ include "cluster-config.full-svc-address" (list $ "nfs-server") }} + share: / + nfsVersion: "4.1" + mountOptions: + mountMode: hard + timeout: 60 + retransmissions: 3 + reclaimPolicy: Delete + volumeBindingMode: WaitForFirstConsumer +{{ end -}} diff --git a/test/dvp-static-cluster/charts/cluster-config/templates/ngc.yaml b/test/dvp-static-cluster/charts/cluster-config/templates/ngc.yaml new file mode 100644 index 0000000000..b225504381 --- /dev/null +++ b/test/dvp-static-cluster/charts/cluster-config/templates/ngc.yaml @@ -0,0 +1,54 @@ +--- +apiVersion: deckhouse.io/v1alpha1 +kind: NodeGroupConfiguration +metadata: + name: qemu-guest-agent-install-ubuntu.sh +spec: + weight: 98 + nodeGroups: ["*"] + bundles: ["ubuntu-lts", "debian", "astra"] + content: | + bb-apt-install qemu-guest-agent bash-completion + systemctl enable --now qemu-guest-agent +--- +apiVersion: deckhouse.io/v1alpha1 +kind: NodeGroupConfiguration +metadata: + name: astra-d8-dm-modules.conf +spec: + weight: 98 + nodeGroups: ["*"] + bundles: ["astra"] + content: | + bb-sync-file /etc/modules-load.d/d8-dm-modules.conf - << "EOF" + dm_snapshot + dm_thin_pool + dm_cache + EOF + + systemctl restart systemd-modules-load.service +--- +apiVersion: deckhouse.io/v1alpha1 +kind: NodeGroupConfiguration +metadata: + name: install-tools.sh +spec: + weight: 98 + nodeGroups: ["*"] + bundles: ["*"] + content: | + bb-sync-file /etc/profile.d/01-kubectl-aliases.sh - << "EOF" + source <(/opt/deckhouse/bin/kubectl completion bash) + alias k=kubectl + complete -o default -F __start_kubectl k + EOF + + if [ ! -f /usr/local/bin/k9s ]; then + K9S_URL=$(curl -s https://api.github.com/repos/derailed/k9s/releases/latest | jq '.assets[] | select(.name=="k9s_Linux_amd64.tar.gz") | .browser_download_url' -r) + curl -L "${K9S_URL}" | tar -xz -C /usr/bin/ "k9s" + fi + + if [ ! -f /usr/local/bin/stern ]; then + STERN_URL=$(curl -s https://api.github.com/repos/stern/stern/releases/latest | jq '.assets[].browser_download_url | select(. | test("linux_amd64"))' -r) + curl -L "${STERN_URL}" | tar -xz -C /usr/bin/ "stern" + fi diff --git a/test/dvp-static-cluster/charts/cluster-config/templates/nodes.yaml b/test/dvp-static-cluster/charts/cluster-config/templates/nodes.yaml new file mode 100644 index 0000000000..c446976548 --- /dev/null +++ b/test/dvp-static-cluster/charts/cluster-config/templates/nodes.yaml @@ -0,0 +1,45 @@ +{{range $_, $v := .Values.instances.additionalNodes }} + {{ if (ne ($v.count |int) 0) }} +--- +apiVersion: deckhouse.io/v1 +kind: NodeGroup +metadata: + name: {{ $v.name }} +spec: + disruptions: + approvalMode: Manual + nodeTemplate: + labels: + node-role.deckhouse.io/{{ $v.name }}: "" + {{ if ne $v.name "worker" -}} + taints: + - effect: NoExecute + key: dedicated.deckhouse.io + value: {{ $v.name }} + {{- end }} + nodeType: Static + staticInstances: + count: {{ $v.count }} + labelSelector: + matchLabels: + role: {{ $v.name }} + {{ end }} + + + {{range $_, $i := untilStep 0 ($v.count | int) 1}} + {{ $vmName := printf "%s-%d" $v.name $i }} +--- +apiVersion: deckhouse.io/v1alpha1 +kind: StaticInstance +metadata: + name: {{ $vmName }} + labels: + role: {{ $v.name }} +spec: + address: {{ index $.Values.discovered.vmIPs $vmName }} + credentialsRef: + kind: SSHCredentials + name: mvp-static + {{- end }} + +{{- end }} diff --git a/test/dvp-static-cluster/charts/cluster-config/templates/ssh-creds.yaml b/test/dvp-static-cluster/charts/cluster-config/templates/ssh-creds.yaml new file mode 100644 index 0000000000..cfb6d9cd69 --- /dev/null +++ b/test/dvp-static-cluster/charts/cluster-config/templates/ssh-creds.yaml @@ -0,0 +1,8 @@ +--- +apiVersion: deckhouse.io/v1alpha2 +kind: SSHCredentials +metadata: + name: mvp-static +spec: + user: cloud + privateSSHKey: {{ .Values.discovered.privateSSHKeyBase64 }} diff --git a/test/dvp-static-cluster/charts/cluster-config/templates/virtualization.yaml b/test/dvp-static-cluster/charts/cluster-config/templates/virtualization.yaml new file mode 100644 index 0000000000..6d168d5788 --- /dev/null +++ b/test/dvp-static-cluster/charts/cluster-config/templates/virtualization.yaml @@ -0,0 +1,31 @@ +{{- if eq .Values.deckhouse.bundle "Default" }} +{{- if .Values.modules.virtualizationEnabled }} +--- +apiVersion: deckhouse.io/v1alpha1 +kind: ModuleConfig +metadata: + name: virtualization +spec: + enabled: true + settings: + dvcr: + storage: + persistentVolumeClaim: + size: 10Gi + # storageClassName: linstor-thin-r1 + type: PersistentVolumeClaim + virtualMachineCIDRs: + - 192.168.10.0/24 + version: 1 +{{ if not .Values.cse }} +--- +apiVersion: deckhouse.io/v1alpha2 +kind: ModulePullOverride +metadata: + name: virtualization +spec: + imageTag: {{ .Values.virtualization.tag }} + scanInterval: 15s +{{ end -}} +{{ end -}} +{{ end -}} diff --git a/test/dvp-static-cluster/charts/infra/.helmignore b/test/dvp-static-cluster/charts/infra/.helmignore new file mode 100644 index 0000000000..0e8a0eb36f --- /dev/null +++ b/test/dvp-static-cluster/charts/infra/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*.orig +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/test/dvp-static-cluster/charts/infra/Chart.yaml b/test/dvp-static-cluster/charts/infra/Chart.yaml new file mode 100644 index 0000000000..e0ab20a245 --- /dev/null +++ b/test/dvp-static-cluster/charts/infra/Chart.yaml @@ -0,0 +1,24 @@ +apiVersion: v2 +name: infra +description: A Helm chart for Kubernetes + +# A chart can be either an 'application' or a 'library' chart. +# +# Application charts are a collection of templates that can be packaged into versioned archives +# to be deployed. +# +# Library charts provide useful utilities or functions for the chart developer. They're included as +# a dependency of application charts to inject those utilities and functions into the rendering +# pipeline. Library charts do not define any templates and therefore cannot be deployed. +type: application + +# This is the chart version. This version number should be incremented each time you make changes +# to the chart and its templates, including the app version. +# Versions are expected to follow Semantic Versioning (https://semver.org/) +version: 0.1.0 + +# This is the version number of the application being deployed. This version number should be +# incremented each time you make changes to the application. Versions are not expected to +# follow Semantic Versioning. They should reflect the version the application is using. +# It is recommended to use it with quotes. +appVersion: "1.16.0" diff --git a/test/dvp-static-cluster/charts/infra/templates/_helpers.tpl b/test/dvp-static-cluster/charts/infra/templates/_helpers.tpl new file mode 100644 index 0000000000..549c9de434 --- /dev/null +++ b/test/dvp-static-cluster/charts/infra/templates/_helpers.tpl @@ -0,0 +1,123 @@ +{{- define "infra.vm-labels" -}} +{{- $prefix := regexReplaceAll "-\\d+$" . "" -}} +vm: {{ . }} +group: {{ $prefix }} +{{- end }} + +{{- define "infra.vmclass-name" -}} +{{ .Values.namespace }}-cpu +{{- end }} + +{{- define "infra.vd-root-name" -}} +{{ . }}-root +{{- end }} + +{{- define "infra.vm" -}} +{{- $ctx := index . 0 -}} +{{- $name := index . 1 -}} +{{- $cfg := index . 2 -}} +--- +kind: Service +apiVersion: v1 +metadata: + name: {{ $name }} + namespace: {{ $ctx.Values.namespace }} + labels: + {{- include "infra.vm-labels" $name | nindent 4 }} +spec: + clusterIP: None + selector: + {{- include "infra.vm-labels" $name | nindent 4 }} +--- +apiVersion: virtualization.deckhouse.io/v1alpha2 +kind: VirtualMachine +metadata: + name: {{ $name }} + namespace: {{ $ctx.Values.namespace }} + labels: + {{- include "infra.vm-labels" $name | nindent 4 }} +spec: + blockDeviceRefs: + - kind: VirtualDisk + name: {{ include "infra.vd-root-name" $name }} +{{- range $i, $v := $cfg.additionalDisks }} + - kind: VirtualDisk + name: {{ printf "%s-%d" $name $i }} +{{- end }} + bootloader: {{ $ctx.Values.image.bootloader }} + liveMigrationPolicy: PreferForced + cpu: + coreFraction: {{ $cfg.cpu.coreFraction }} + cores: {{ $cfg.cpu.cores }} + disruptions: + restartApprovalMode: Automatic + enableParavirtualization: true + memory: + size: {{ $cfg.memory.size }} + osType: Generic + provisioning: + type: UserData + userData: | + #cloud-config + ssh_pwauth: true + package_update: true + packages: + - tmux + - htop + - qemu-guest-agent + - iputils-ping + - jq + - rsync + - fio + - bind9-dnsutils + users: + - default + - name: cloud + passwd: $6$rounds=4096$vln/.aPHBOI7BMYR$bBMkqQvuGs5Gyd/1H5DP4m9HjQSy.kgrxpaGEHwkX7KEFV8BS.HZWPitAtZ2Vd8ZqIZRqmlykRCagTgPejt1i. + shell: /bin/bash + sudo: ALL=(ALL) NOPASSWD:ALL + chpasswd: {expire: False} + lock_passwd: false + ssh_authorized_keys: + - {{ $ctx.Values.discovered.publicSSHKey }} + + runcmd: + - systemctl enable --now qemu-guest-agent.service + final_message: "\U0001F525\U0001F525\U0001F525 The system is finally up, after $UPTIME seconds \U0001F525\U0001F525\U0001F525" + runPolicy: AlwaysOn + virtualMachineClassName: {{ include "infra.vmclass-name" $ctx }} +--- +apiVersion: virtualization.deckhouse.io/v1alpha2 +kind: VirtualDisk +metadata: + name: {{ include "infra.vd-root-name" $name }} + namespace: {{ $ctx.Values.namespace }} + labels: + {{- include "infra.vm-labels" $name | nindent 4 }} +spec: + dataSource: + objectRef: + kind: VirtualImage + name: base-image + type: ObjectRef + persistentVolumeClaim: + size: {{ $cfg.rootDiskSize | default "50Gi" }} + {{- if $ctx.Values.storageClass }} + storageClassName: {{ $ctx.Values.storageClass }} + {{- end }} + + {{range $i, $v := $cfg.additionalDisks }} +--- +apiVersion: virtualization.deckhouse.io/v1alpha2 +kind: VirtualDisk +metadata: + name: {{ printf "%s-%d" $name $i }} + namespace: {{ $ctx.Values.namespace }} +spec: + persistentVolumeClaim: + size: {{ $v.size }} + {{- if $ctx.Values.storageClass }} + storageClassName: {{ $ctx.Values.storageClass }} + {{- end }} + {{- end }} +{{- end }} diff --git a/test/dvp-static-cluster/charts/infra/templates/ingress.yaml b/test/dvp-static-cluster/charts/infra/templates/ingress.yaml new file mode 100644 index 0000000000..df81d5326d --- /dev/null +++ b/test/dvp-static-cluster/charts/infra/templates/ingress.yaml @@ -0,0 +1,74 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: dvp-over-dvp-80 + namespace: {{ .Values.namespace }} +spec: + ports: + - port: 80 + targetPort: 80 + protocol: TCP + name: http + selector: + group: master +--- +apiVersion: v1 +kind: Service +metadata: + name: dvp-over-dvp-443 + namespace: {{ .Values.namespace }} +spec: + ports: + - port: 443 + targetPort: 443 + protocol: TCP + name: https + selector: + group: master +--- +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: wildcard-https + namespace: {{ .Values.namespace }} + annotations: + nginx.ingress.kubernetes.io/ssl-passthrough: "true" + nginx.ingress.kubernetes.io/backend-protocol: "HTTPS" +spec: + ingressClassName: nginx + rules: + {{- range .Values.ingressHosts }} + - host: "{{ . }}.{{ $.Values.namespace }}.{{ $.Values.discovered.domain }}" + http: + paths: + - path: / + pathType: Prefix + backend: + service: + name: dvp-over-dvp-443 + port: + number: 443 + {{- end }} +--- +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: wildcard-http + namespace: {{ .Values.namespace }} + annotations: + nginx.ingress.kubernetes.io/ssl-redirect: "false" + nginx.ingress.kubernetes.io/rewrite-target: / +spec: + ingressClassName: nginx + rules: + - host: "*.{{ .Values.namespace }}.{{ .Values.discovered.domain }}" + http: + paths: + - path: / + pathType: Prefix + backend: + service: + name: dvp-over-dvp-80 + port: + number: 80 diff --git a/test/dvp-static-cluster/charts/infra/templates/jump-host/deploy.yaml b/test/dvp-static-cluster/charts/infra/templates/jump-host/deploy.yaml new file mode 100644 index 0000000000..e76f76dbd0 --- /dev/null +++ b/test/dvp-static-cluster/charts/infra/templates/jump-host/deploy.yaml @@ -0,0 +1,38 @@ +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: jump-host + namespace: {{ .Values.namespace }} +spec: + replicas: 1 + selector: + matchLabels: + app: jump-host + template: + metadata: + labels: + app: jump-host + spec: + containers: + - name: jump-host + image: registry-dvp.dev.flant.dev/tools/jump-host:v0.1.2 + imagePullPolicy: Always + resources: + limits: + cpu: "200m" + memory: "200Mi" + requests: + cpu: "200m" + memory: "200Mi" + ports: + - containerPort: 2222 + env: + - name: SSH_KEY + value: "{{ .Values.sshPublicKey }}" + securityContext: + runAsNonRoot: true + runAsUser: 1000 + securityContext: + runAsNonRoot: true + runAsUser: 1000 diff --git a/test/dvp-static-cluster/charts/infra/templates/jump-host/ingress.yaml b/test/dvp-static-cluster/charts/infra/templates/jump-host/ingress.yaml new file mode 100644 index 0000000000..12f1f11fb0 --- /dev/null +++ b/test/dvp-static-cluster/charts/infra/templates/jump-host/ingress.yaml @@ -0,0 +1,40 @@ +{{- define "jump-host.ws-fqdn" -}} +"ws.{{ .Values.namespace }}.{{ .Values.discovered.domain }}" +{{- end }} +--- +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: ws + namespace: {{ .Values.namespace }} +spec: + ingressClassName: nginx + rules: + - host: {{ include "jump-host.ws-fqdn" . }} + http: + paths: + - backend: + service: + name: jump-host + port: + number: 8080 + path: / + pathType: ImplementationSpecific + tls: + - hosts: + - {{ include "jump-host.ws-fqdn" . }} + secretName: ws-tls +--- +apiVersion: cert-manager.io/v1 +kind: Certificate +metadata: + name: ws-tls + namespace: {{ .Values.namespace }} +spec: + certificateOwnerRef: false + dnsNames: + - {{ include "jump-host.ws-fqdn" . }} + issuerRef: + kind: ClusterIssuer + name: letsencrypt + secretName: ws-tls diff --git a/test/dvp-static-cluster/charts/infra/templates/jump-host/svc.yaml b/test/dvp-static-cluster/charts/infra/templates/jump-host/svc.yaml new file mode 100644 index 0000000000..4634ca7313 --- /dev/null +++ b/test/dvp-static-cluster/charts/infra/templates/jump-host/svc.yaml @@ -0,0 +1,29 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: jump-host + namespace: {{ .Values.namespace }} +spec: + type: NodePort + selector: + app: jump-host + ports: + - name: ssh + protocol: TCP + port: 2222 + targetPort: 2222 +--- +apiVersion: v1 +kind: Service +metadata: + name: jump-host-ws + namespace: {{ .Values.namespace }} +spec: + selector: + app: jump-host + ports: + - name: ws + protocol: TCP + port: 8080 + targetPort: 8080 diff --git a/test/dvp-static-cluster/charts/infra/templates/ns.yaml b/test/dvp-static-cluster/charts/infra/templates/ns.yaml new file mode 100644 index 0000000000..77db5f9f65 --- /dev/null +++ b/test/dvp-static-cluster/charts/infra/templates/ns.yaml @@ -0,0 +1,4 @@ +apiVersion: v1 +kind: Namespace +metadata: + name: {{ .Values.namespace }} diff --git a/test/dvp-static-cluster/charts/infra/templates/vi.yaml b/test/dvp-static-cluster/charts/infra/templates/vi.yaml new file mode 100644 index 0000000000..541d23cbc7 --- /dev/null +++ b/test/dvp-static-cluster/charts/infra/templates/vi.yaml @@ -0,0 +1,12 @@ +--- +apiVersion: virtualization.deckhouse.io/v1alpha2 +kind: VirtualImage +metadata: + name: base-image + namespace: {{ .Values.namespace }} +spec: + storage: ContainerRegistry + dataSource: + type: HTTP + http: + url: {{ .Values.image.url }} diff --git a/test/dvp-static-cluster/charts/infra/templates/vmc.yaml b/test/dvp-static-cluster/charts/infra/templates/vmc.yaml new file mode 100644 index 0000000000..c91d76f6c0 --- /dev/null +++ b/test/dvp-static-cluster/charts/infra/templates/vmc.yaml @@ -0,0 +1,7 @@ +apiVersion: virtualization.deckhouse.io/v1alpha2 +kind: VirtualMachineClass +metadata: + name: {{ include "infra.vmclass-name" . }} +spec: + cpu: + type: Discovery diff --git a/test/dvp-static-cluster/charts/infra/templates/vms.yaml b/test/dvp-static-cluster/charts/infra/templates/vms.yaml new file mode 100644 index 0000000000..6da53b3ca5 --- /dev/null +++ b/test/dvp-static-cluster/charts/infra/templates/vms.yaml @@ -0,0 +1,12 @@ + +{{range $_, $i := untilStep 0 (.Values.instances.masterNodes.count | int) 1}} + {{- $vmName := printf "master-%d" $i -}} + {{ include "infra.vm" (list $ $vmName $.Values.instances.masterNodes.cfg) | nindent 0 }} +{{- end }} + +{{range $_, $v := .Values.instances.additionalNodes }} + {{range $_, $i := untilStep 0 ($v.count | int) 1}} + {{- $vmName := printf "%s-%d" $v.name $i -}} + {{ include "infra.vm" (list $ $vmName $v.cfg) | nindent 0}} + {{- end }} +{{- end }} diff --git a/test/dvp-static-cluster/nested-sa-config/gen-sa.sh b/test/dvp-static-cluster/nested-sa-config/gen-sa.sh new file mode 100644 index 0000000000..02e01b5e55 --- /dev/null +++ b/test/dvp-static-cluster/nested-sa-config/gen-sa.sh @@ -0,0 +1,184 @@ +#!/usr/bin/env bash + +# Copyright 2025 Flant JSC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +get_current_date() { + date +"%H:%M:%S %d-%m-%Y" +} + +get_timestamp() { + date +%s +} + +# Colors for output +RED='\033[0;31m' +GREEN='\033[0;32m' +YELLOW='\033[1;33m' +BLUE='\033[0;34m' +CYAN='\033[0;36m' +NC='\033[0m' # No Color + +log_info() { + local message="$1" + local timestamp=$(get_current_date) + echo -e "${BLUE}[INFO]${NC} $message" + if [ -n "$LOG_FILE" ]; then + echo "[$timestamp] [INFO] $message" >> "$LOG_FILE" + fi +} + +log_success() { + local message="$1" + local timestamp=$(get_current_date) + echo -e "${GREEN}[SUCCESS]${NC} $message" + if [ -n "$LOG_FILE" ]; then + echo "[$timestamp] [SUCCESS] $message" >> "$LOG_FILE" + fi +} + +log_warning() { + local message="$1" + local timestamp=$(get_current_date) + echo -e "${YELLOW}[WARNING]${NC} $message" + if [ -n "$LOG_FILE" ]; then + echo "[$timestamp] [WARNING] $message" >> "$LOG_FILE" + fi +} + +log_error() { + local message="$1" + local timestamp=$(get_current_date) + echo -e "${RED}[ERROR]${NC} $message" + if [ -n "$LOG_FILE" ]; then + echo "[$timestamp] [ERROR] $message" >> "$LOG_FILE" + fi +} + +exit_trap() { + echo "" + log_info "Exiting..." + echo "" + exit 0 +} + +kubectl() { + sudo /opt/deckhouse/bin/kubectl $@ +} + +trap exit_trap SIGINT SIGTERM + + +SA_NAME=$1 +CLUSTER_PREFIX=$2 +CLUSTER_NAME=$3 +FILE_NAME=$4 + +if [[ -z "$SA_NAME" ]] || [[ -z "$CLUSTER_PREFIX" ]] || [[ -z "$CLUSTER_NAME" ]]; then + log_error "Usage: gen-sa.sh [FILE_NAME]" + exit 1 +fi + +if [[ -z "$FILE_NAME" ]]; then + FILE_NAME=/tmp/kube.config +fi + +SA_TOKEN=virt-${CLUSTER_PREFIX}-${SA_NAME}-token +SA_CAR_NAME=virt-${CLUSTER_PREFIX}-${SA_NAME} + +USER_NAME=${SA_NAME} +CONTEXT_NAME=${CLUSTER_NAME}-${USER_NAME} + +if kubectl cluster-info > /dev/null 2>&1; then + log_success "Access to Kubernetes cluster exists." +else + log_error "No access to Kubernetes cluster or configuration issue." + exit 1 +fi + +sleep 2 +log_info "====" +log_info "Kubeconfig will be created successfully if you connected to k8s cluster via ssh tunnel or directly" +log_info "====" +sleep 2 + + +log_info "Apply SA, Secrets and ClusterAuthorizationRule" +kubectl apply -f -< /etc/ceph/ceph.conf + [global] + mon_host = $(sed 's/[a-z]=//g' /etc/rook/mon-endpoints) + EOF + + cat << EOF > /etc/ceph/ceph.client.admin.keyring + [$ROOK_CEPH_USERNAME] + key = $ROOK_CEPH_SECRET + EOF + env: + - name: ROOK_CEPH_USERNAME + valueFrom: + secretKeyRef: + key: ceph-username + name: rook-ceph-mon + - name: ROOK_CEPH_SECRET + valueFrom: + secretKeyRef: + key: ceph-secret + name: rook-ceph-mon + volumeMounts: + - mountPath: /etc/ceph + name: ceph-config + - mountPath: /etc/rook + name: mon-endpoint-volume + containers: + - name: ceph-tools + command: + - sleep + - infinity + image: quay.io/ceph/ceph:v18.2.2 + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + tty: true + workingDir: /var/lib/ceph + volumeMounts: + - mountPath: /etc/ceph + name: ceph-config + - mountPath: /var/lib/ceph + name: homedir + securityContext: + runAsGroup: 167 + runAsNonRoot: true + runAsUser: 167 + volumes: + - name: mon-endpoint-volume + configMap: + defaultMode: 420 + items: + - key: data + path: mon-endpoints + name: rook-ceph-mon-endpoints + - name: ceph-config + emptyDir: {} + - name: homedir + emptyDir: {} diff --git a/test/dvp-static-cluster/storage/ceph/ceph-configure.sh b/test/dvp-static-cluster/storage/ceph/ceph-configure.sh new file mode 100644 index 0000000000..aad18a1bf5 --- /dev/null +++ b/test/dvp-static-cluster/storage/ceph/ceph-configure.sh @@ -0,0 +1,85 @@ +#!/usr/bin/env bash + +# Copyright 2025 Flant JSC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +ceph_user_pool=ceph-rbd-pool-r2 +echo "Use user $ceph_user_pool" +echo "Set permissions for user $ceph_user_pool (mgr 'allow *' mon 'allow *' osd 'allow *' mds 'allow *')" +usr=$(kubectl -n d8-operator-ceph exec deployments/rook-ceph-tools -c ceph-tools -- \ + ceph auth get-or-create client.$ceph_user_pool mon 'allow *' mgr 'allow *' osd "allow *") +echo "Get fsid" +fsid=$(kubectl -n d8-operator-ceph exec deployments/rook-ceph-tools -c ceph-tools -- ceph fsid) + +userKey="${usr#*key = }" +ceph_monitors_ip=$(kubectl -n d8-operator-ceph get svc | grep mon | awk '{print $3}') +monitors_yaml=$( + for monitor_ip in $ceph_monitors_ip; do + echo " - $monitor_ip:6789" + done +) + +# Verify we have monitors +if [ -z "$monitors_yaml" ]; then + echo "ERROR: No Ceph monitors found" + exit 1 +fi + +echo "Create CephClusterConnection" +kubectl apply -f - <> "${manifest}" +--- +apiVersion: storage.deckhouse.io/v1alpha1 +kind: LVMVolumeGroup +metadata: + name: vg-data-${node_name}-${dev_path} +spec: + actualVGNameOnTheNode: vg-thin-data + type: Local + local: + nodeName: ${dev_node} + blockDeviceSelector: + matchExpressions: + - key: kubernetes.io/metadata.name + operator: In + values: + - ${dev_name} + thinPools: + - name: thin-data + size: ${LVMVG_SIZE} + allocationLimit: 100% +EOF + +done + +kubectl apply -f "${manifest}" diff --git a/test/dvp-static-cluster/storage/sds-replicated/mc.yaml b/test/dvp-static-cluster/storage/sds-replicated/mc.yaml new file mode 100644 index 0000000000..b7d6abda99 --- /dev/null +++ b/test/dvp-static-cluster/storage/sds-replicated/mc.yaml @@ -0,0 +1,32 @@ +--- +apiVersion: deckhouse.io/v1alpha1 +kind: ModuleConfig +metadata: + name: sds-node-configurator +spec: + version: 1 + enabled: true +--- +apiVersion: deckhouse.io/v1alpha1 +kind: ModuleConfig +metadata: + name: sds-replicated-volume +spec: + version: 1 + enabled: true +--- +apiVersion: deckhouse.io/v1alpha2 +kind: ModulePullOverride +metadata: + name: sds-node-configurator +spec: + imageTag: main + scanInterval: 15s +--- +apiVersion: deckhouse.io/v1alpha2 +kind: ModulePullOverride +metadata: + name: sds-replicated-volume +spec: + imageTag: main + scanInterval: 15s diff --git a/test/dvp-static-cluster/storage/sds-replicated/rsc-gen.sh b/test/dvp-static-cluster/storage/sds-replicated/rsc-gen.sh new file mode 100644 index 0000000000..7d93443620 --- /dev/null +++ b/test/dvp-static-cluster/storage/sds-replicated/rsc-gen.sh @@ -0,0 +1,87 @@ +#!/usr/bin/env bash + +# Copyright 2025 Flant JSC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +manifest=sds-rsp-rsc.yaml +replicatedStoragePoolName=thin-data + +pools=$(kubectl get lvmvolumegroup -o json | jq '.items[] | {name: .metadata.name, thinPoolName: .spec.thinPools[0].name}' -rc) + +cat << EOF > "${manifest}" +--- +apiVersion: storage.deckhouse.io/v1alpha1 +kind: ReplicatedStoragePool +metadata: + name: $replicatedStoragePoolName +spec: + type: LVMThin + lvmVolumeGroups: +EOF + +for pool in ${pools}; do + vg_name=$(echo $pool | jq -r '.name'); + pool_node=$(echo $pool | jq -r '.thinPoolName'); + echo "${pool_node} ${vg_name}" +cat << EOF >> "${manifest}" + - name: ${vg_name} + thinPoolName: ${pool_node} +EOF +done + +cat << EOF >> "${manifest}" +--- +apiVersion: storage.deckhouse.io/v1alpha1 +kind: ReplicatedStorageClass +metadata: + name: nested-thin-r2 +spec: + replication: Availability + storagePool: $replicatedStoragePoolName + reclaimPolicy: Delete + volumeAccess: PreferablyLocal + topology: Ignored +--- +apiVersion: storage.deckhouse.io/v1alpha1 +kind: ReplicatedStorageClass +metadata: + name: nested-thin-r1 +spec: + replication: None + storagePool: $replicatedStoragePoolName + reclaimPolicy: Delete + volumeAccess: PreferablyLocal + topology: Ignored +--- +apiVersion: storage.deckhouse.io/v1alpha1 +kind: ReplicatedStorageClass +metadata: + name: nested-thin-r1-immediate +spec: + replication: None + storagePool: $replicatedStoragePoolName + reclaimPolicy: Delete + volumeAccess: Any + topology: Ignored +EOF + +kubectl apply -f ${manifest} + +DEFAULT_STORAGE_CLASS=nested-thin-r1 +kubectl patch mc global --type='json' -p='[{"op": "replace", "path": "/spec/settings/defaultClusterStorageClass", "value": "'"$DEFAULT_STORAGE_CLASS"'"}]' + +sleep 2 +echo "Showing Storage Classes" +kubectl get storageclass +echo " " From c166ee17c16323cb02d9dadcfdc718101aa3f597 Mon Sep 17 00:00:00 2001 From: Nikita Korolev Date: Tue, 16 Dec 2025 16:13:41 +0300 Subject: [PATCH 07/71] test bootstrap with proxy settings in nested cluster Signed-off-by: Nikita Korolev --- .github/workflows/e2e-matrix.yml | 2 ++ .github/workflows/e2e-reusable-pipeline.yml | 30 ++++++++++++++++++- .../templates/cluster-config.yaml | 13 ++++++++ 3 files changed, 44 insertions(+), 1 deletion(-) diff --git a/.github/workflows/e2e-matrix.yml b/.github/workflows/e2e-matrix.yml index c1134dd952..fb86317f67 100644 --- a/.github/workflows/e2e-matrix.yml +++ b/.github/workflows/e2e-matrix.yml @@ -48,6 +48,7 @@ jobs: DEV_REGISTRY_DOCKER_CFG: ${{ secrets.DEV_REGISTRY_DOCKER_CFG }} VIRT_E2E_NIGHTLY_SA_TOKEN: ${{ secrets.VIRT_E2E_NIGHTLY_SA_TOKEN }} PROD_IO_REGISTRY_DOCKER_CFG: ${{ secrets.PROD_IO_REGISTRY_DOCKER_CFG }} + BOOTSTRAP_DEV_PROXY: ${{ secrets.BOOTSTRAP_DEV_PROXY }} e2e-replicated: name: E2E Pipeline (Replicated) @@ -66,6 +67,7 @@ jobs: DEV_REGISTRY_DOCKER_CFG: ${{ secrets.DEV_REGISTRY_DOCKER_CFG }} VIRT_E2E_NIGHTLY_SA_TOKEN: ${{ secrets.VIRT_E2E_NIGHTLY_SA_TOKEN }} PROD_IO_REGISTRY_DOCKER_CFG: ${{ secrets.PROD_IO_REGISTRY_DOCKER_CFG }} + BOOTSTRAP_DEV_PROXY: ${{ secrets.BOOTSTRAP_DEV_PROXY }} report-to-channel: diff --git a/.github/workflows/e2e-reusable-pipeline.yml b/.github/workflows/e2e-reusable-pipeline.yml index 3ca5528e07..50679b8c41 100644 --- a/.github/workflows/e2e-reusable-pipeline.yml +++ b/.github/workflows/e2e-reusable-pipeline.yml @@ -66,6 +66,8 @@ on: required: true PROD_IO_REGISTRY_DOCKER_CFG: required: true + BOOTSTRAP_DEV_PROXY: + required: true outputs: e2e-summary: description: "E2E test results" @@ -161,8 +163,10 @@ jobs: tag: ${{ env.DECKHOUSE_TAG }} kubernetesVersion: Automatic registryDockerCfg: ${{ secrets.DEV_REGISTRY_DOCKER_CFG }} + httpProxy: ${{ secrets.BOOTSTRAP_DEV_PROXY }} image: - url: https://89d64382-20df-4581-8cc7-80df331f67fa.selstorage.ru/ubuntu/noble-server-cloudimg-amd64.img + url: https://cloud-images.ubuntu.com/noble/current/noble-server-cloudimg-amd64.img + # url: https://89d64382-20df-4581-8cc7-80df331f67fa.selstorage.ru/ubuntu/noble-server-cloudimg-amd64.img defaultUser: ${{ env.DEFAULT_USER }} bootloader: BIOS ingressHosts: @@ -489,6 +493,30 @@ jobs: exit 1 fi + echo "Wait pods and webhooks sds-replicated pods" + for i in {1..60}; do + echo "Check sds-replicated pods, linstor-node csi-node webhooks" + linstor_node=$(kubectl -n d8-sds-replicated-volume get po 2>/dev/null | grep linstor-node | grep -c Running || echo 0) + csi_node=$(kubectl -n d8-sds-replicated-volume get po 2>/dev/null | grep csi-node | grep -c Running || echo 0) + webhooks=$(kubectl -n d8-sds-replicated-volume get po 2>/dev/null | grep webhooks | grep -c Running || echo 0) + + echo "check if sds-replicated pods are ready" + if [[ "${linstor_node}" -ge "${workers}" ]] && [[ "${csi_node}" -ge "${workers}" ]] && [[ "${webhooks}" -ge "1" ]]; then + echo "sds-replicated-volume is ready" + break + fi + + echo "Not all pods are ready, " + echo "Waiting 10s for sds-replicated-volume to be ready" + if (( i % 5 == 0 )); then + echo "= Get pods =" + kubectl -n d8-sds-replicated-volume get pods || true + echo "Show queue" + d8 p queue list | head -n25 || echo "Failed to retrieve list queue" + echo "=====" + fi + done + chmod +x lvg-gen.sh ./lvg-gen.sh diff --git a/test/dvp-over-dvp/charts/cluster-config/templates/cluster-config.yaml b/test/dvp-over-dvp/charts/cluster-config/templates/cluster-config.yaml index fa7fd15e14..19f5e9c944 100644 --- a/test/dvp-over-dvp/charts/cluster-config/templates/cluster-config.yaml +++ b/test/dvp-over-dvp/charts/cluster-config/templates/cluster-config.yaml @@ -8,6 +8,19 @@ podSubnetCIDR: 10.112.0.0/16 serviceSubnetCIDR: 10.223.0.0/16 kubernetesVersion: "{{ .Values.deckhouse.kubernetesVersion }}" clusterDomain: "internal.cluster.local" +proxy: + httpProxy: "{{ .Values.deckhouse.httpProxy }}" + httpsProxy: "{{ .Values.deckhouse.httpProxy }}" + noProxy: + - "localhost" + - "127.0.0.1" + - "10.0.0.0/8" + - "172.16.0.0/12" + - "192.168.0.0/16" + - "10.112.0.0/16" + - "10.223.0.0/16" + - docker.io + - ".ubuntu.com" --- apiVersion: deckhouse.io/v1 kind: InitConfiguration From cd0ad62550eab59eca8af366cf54175d9c739490 Mon Sep 17 00:00:00 2001 From: Nikita Korolev Date: Tue, 16 Dec 2025 19:05:47 +0300 Subject: [PATCH 08/71] fix Signed-off-by: Nikita Korolev --- .github/workflows/e2e-reusable-pipeline.yml | 1 - 1 file changed, 1 deletion(-) diff --git a/.github/workflows/e2e-reusable-pipeline.yml b/.github/workflows/e2e-reusable-pipeline.yml index 50679b8c41..e080d9b247 100644 --- a/.github/workflows/e2e-reusable-pipeline.yml +++ b/.github/workflows/e2e-reusable-pipeline.yml @@ -506,7 +506,6 @@ jobs: break fi - echo "Not all pods are ready, " echo "Waiting 10s for sds-replicated-volume to be ready" if (( i % 5 == 0 )); then echo "= Get pods =" From 11a494727d3cdda93cda05e9e7bec219ddab68ff Mon Sep 17 00:00:00 2001 From: Nikita Korolev Date: Wed, 17 Dec 2025 17:45:35 +0300 Subject: [PATCH 09/71] static: test deploy Signed-off-by: Nikita Korolev --- .github/workflows/e2e-reusable-pipeline.yml | 117 +++++++++----- test/dvp-static-cluster/Taskfile.yaml | 113 +++++++------- .../templates/cluster-config.yaml | 17 +- .../templates/disabled-modules.yaml | 2 +- .../templates/master-nodes.yaml | 2 +- .../cluster-config/templates/modules-cse.yaml | 18 --- .../templates/modules-dvp-base.yaml | 54 +------ .../templates/modules-minimal.yaml | 10 +- .../cluster-config/templates/nodes.yaml | 9 +- .../templates/virtualization.yaml | 2 + .../charts/infra/templates/_helpers.tpl | 4 - .../charts/infra/templates/ingress.yaml | 4 +- .../infra/templates/jump-host/deploy.yaml | 2 +- .../charts/infra/templates/vms.yaml | 8 +- .../tools/deckhouse-queue.sh | 145 ++++++++++++++++++ 15 files changed, 312 insertions(+), 195 deletions(-) delete mode 100644 test/dvp-static-cluster/charts/cluster-config/templates/modules-cse.yaml create mode 100644 test/dvp-static-cluster/tools/deckhouse-queue.sh diff --git a/.github/workflows/e2e-reusable-pipeline.yml b/.github/workflows/e2e-reusable-pipeline.yml index e080d9b247..bdaf8e1069 100644 --- a/.github/workflows/e2e-reusable-pipeline.yml +++ b/.github/workflows/e2e-reusable-pipeline.yml @@ -78,8 +78,11 @@ env: BRANCH: ${{ inputs.branch }} VIRTUALIZATION_TAG: ${{ inputs.virtualization_tag }} DECKHOUSE_TAG: ${{ inputs.deckhouse_tag }} - DEFAULT_USER: ${{ inputs.default_user }} + # DEFAULT_USER: ${{ inputs.default_user }} + DEFAULT_USER: cloud GO_VERSION: ${{ inputs.go_version }} + # CLUSTER_TYPE_PATH: ${{ env.CLUSTER_TYPE_PATH }} + CLUSTER_TYPE_PATH: test/dvp-static-cluster defaults: run: @@ -147,12 +150,14 @@ jobs: kubeconfig: ${{ secrets.VIRT_E2E_NIGHTLY_SA_TOKEN }} - name: Generate values.yaml + working-directory: ${{ env.CLUSTER_TYPE_PATH }} run: | defaultStorageClass=$(kubectl get storageclass -o json \ | jq -r '.items[] | select(.metadata.annotations."storageclass.kubernetes.io/is-default-class" == "true") | .metadata.name') - cat < test/dvp-over-dvp/values.yaml + cat < values.yaml namespace: ${{ steps.vars.outputs.namespace }} + storage_type: ${{ inputs.storage_type }} storageClass: ${defaultStorageClass} nfsEnabled: false nfsSC: nested-nfs-${{ inputs.storage_type }}-${{ steps.vars.outputs.sha_short }} @@ -164,11 +169,15 @@ jobs: kubernetesVersion: Automatic registryDockerCfg: ${{ secrets.DEV_REGISTRY_DOCKER_CFG }} httpProxy: ${{ secrets.BOOTSTRAP_DEV_PROXY }} + bundle: Default image: url: https://cloud-images.ubuntu.com/noble/current/noble-server-cloudimg-amd64.img # url: https://89d64382-20df-4581-8cc7-80df331f67fa.selstorage.ru/ubuntu/noble-server-cloudimg-amd64.img defaultUser: ${{ env.DEFAULT_USER }} bootloader: BIOS + modules: + virtualizationEnabled: false + sdsReplicatedVolumeEnabled: false ingressHosts: - api - grafana @@ -179,31 +188,51 @@ jobs: instances: masterNodes: count: 1 - cores: 8 - coreFraction: 50% - memory: 14Gi + cfg: + rootDiskSize: 60Gi + cpu: + cores: 4 + coreFraction: 50% + memory: + size: 12Gi additionalNodes: - - name: worker - count: 3 - cores: 10 - coreFraction: 25% - memory: 8Gi - nodeType: CloudEphemeral - bootloader: BIOS + - name: worker + count: 3 + cfg: + cpu: + cores: 4 + coreFraction: 50% + memory: + size: 6Gi + additionalDisks: + - size: 50Gi EOF + # instances: + # masterNodes: + # count: 1 + # cores: 8 + # coreFraction: 50% + # memory: 14Gi + # additionalNodes: + # - name: worker + # count: 3 + # cores: 10 + # coreFraction: 25% + # memory: 8Gi + # bootloader: BIOS - name: Bootstrap cluster [infra-deploy] - working-directory: test/dvp-over-dvp + working-directory: ${{ env.CLUSTER_TYPE_PATH }} run: | task infra-deploy - name: Bootstrap cluster [dhctl-bootstrap] id: dhctl-bootstrap - working-directory: test/dvp-over-dvp + working-directory: ${{ env.CLUSTER_TYPE_PATH }} run: | task dhctl-bootstrap timeout-minutes: 30 - name: Bootstrap cluster [show-connection-info] - working-directory: test/dvp-over-dvp + working-directory: ${{ env.CLUSTER_TYPE_PATH }} run: | task show-connection-info @@ -212,14 +241,16 @@ jobs: NAMESPACE: ${{ steps.vars.outputs.namespace }} if: always() run: | - kubectl -n $NAMESPACE create secret generic ssh-key --from-file=test/dvp-over-dvp/tmp/ssh/cloud + kubectl -n $NAMESPACE create secret generic ssh-key --from-file=${{ env.CLUSTER_TYPE_PATH }}/tmp/ssh/cloud - name: Get info about nested master VM - working-directory: test/dvp-over-dvp + working-directory: ${{ env.CLUSTER_TYPE_PATH }} env: NAMESPACE: ${{ steps.vars.outputs.namespace }} + PREFIX: ${{ inputs.storage_type }} run: | - nested_master=$(kubectl -n ${NAMESPACE} get vm -l dvp.deckhouse.io/node-group=master -o jsonpath="{.items[0].metadata.name}") + # nested_master=$(kubectl -n ${NAMESPACE} get vm -l dvp.deckhouse.io/node-group=master -o jsonpath="{.items[0].metadata.name}") + nested_master=$(kubectl -n ${NAMESPACE} get vm -l group=${PREFIX}-master -o jsonpath="{.items[0].metadata.name}") echo "Pods" kubectl get pods -n "${NAMESPACE}" @@ -253,12 +284,15 @@ jobs: - name: Generate nested kubeconfig id: generate-kubeconfig - working-directory: test/dvp-over-dvp + working-directory: ${{ env.CLUSTER_TYPE_PATH }} env: kubeConfigPath: tmp/kube.config NAMESPACE: ${{ steps.vars.outputs.namespace }} + PREFIX: ${{ inputs.storage_type }} run: | - nested_master=$(kubectl -n $NAMESPACE get vm -l dvp.deckhouse.io/node-group=master -o jsonpath="{.items[0].metadata.name}") + # nested_master=$(kubectl -n $NAMESPACE get vm -l dvp.deckhouse.io/node-group=master -o jsonpath="{.items[0].metadata.name}") + + nested_master=$(kubectl -n ${NAMESPACE} get vm -l group=${PREFIX}-master -o jsonpath="{.items[0].metadata.name}") d8vscp() { local source=$1 @@ -345,8 +379,10 @@ jobs: if: steps.dhctl-bootstrap.outcome == 'failure' env: NAMESPACE: ${{ steps.vars.outputs.namespace }} + PREFIX: ${{ inputs.storage_type }} run: | - nested_master=$(kubectl -n $NAMESPACE get vm -l dvp.deckhouse.io/node-group=master -o jsonpath="{.items[0].metadata.name}") + # nested_master=$(kubectl -n $NAMESPACE get vm -l dvp.deckhouse.io/node-group=master -o jsonpath="{.items[0].metadata.name}") + nested_master=$(kubectl -n ${NAMESPACE} get vm -l group=${PREFIX}-master -o jsonpath="{.items[0].metadata.name}") d8vscp() { local source=$1 @@ -359,15 +395,15 @@ jobs: echo "d8vscp: $source -> $dest - done" } - d8vscp "${DEFAULT_USER}@${nested_master}.$NAMESPACE:/var/log/cloud-init*.log" "./test/dvp-over-dvp/tmp/" + d8vscp "${DEFAULT_USER}@${nested_master}.$NAMESPACE:/var/log/cloud-init*.log" "./${{ env.CLUSTER_TYPE_PATH }}/tmp/" - name: Prepare artifact if: always() run: | - sudo chown -fR 1001:1001 test/dvp-over-dvp - yq e '.deckhouse.registryDockerCfg = "None"' -i ./test/dvp-over-dvp/values.yaml - yq e 'select(.kind == "InitConfiguration") .deckhouse.registryDockerCfg = "None"' -i ./test/dvp-over-dvp/tmp/config.yaml - echo "${{ steps.generate-kubeconfig.outputs.config }}" | base64 -d | base64 -d > ./test/dvp-over-dvp/kube-config + sudo chown -fR 1001:1001 ${{ env.CLUSTER_TYPE_PATH }} + yq e '.deckhouse.registryDockerCfg = "None"' -i ./${{ env.CLUSTER_TYPE_PATH }}/values.yaml + yq e 'select(.kind == "InitConfiguration") .deckhouse.registryDockerCfg = "None"' -i ./${{ env.CLUSTER_TYPE_PATH }}/tmp/config.yaml + echo "${{ steps.generate-kubeconfig.outputs.config }}" | base64 -d | base64 -d > ./${{ env.CLUSTER_TYPE_PATH }}/kube-config - name: Upload generated files uses: actions/upload-artifact@v4 @@ -376,8 +412,8 @@ jobs: with: name: generated-files-${{ inputs.storage_type }} path: | - test/dvp-over-dvp/tmp - test/dvp-over-dvp/values.yaml + ${{ env.CLUSTER_TYPE_PATH }}/tmp + ${{ env.CLUSTER_TYPE_PATH }}/values.yaml overwrite: true include-hidden-files: true retention-days: 1 @@ -388,7 +424,7 @@ jobs: if: always() with: name: generated-files-ssh-${{ inputs.storage_type }} - path: test/dvp-over-dvp/tmp/ssh + path: ${{ env.CLUSTER_TYPE_PATH }}/tmp/ssh overwrite: true include-hidden-files: true retention-days: 1 @@ -399,7 +435,7 @@ jobs: if: always() with: name: generated-files-kubeconfig-${{ inputs.storage_type }} - path: test/dvp-over-dvp/kube-config + path: ${{ env.CLUSTER_TYPE_PATH }}/kube-config overwrite: true include-hidden-files: true retention-days: 1 @@ -434,11 +470,22 @@ jobs: echo "kubectl get nodes" kubectl config use-context nested-e2e-nested-sa - kubectl get nodes + # kubectl get nodes + + for i in {1..3}; do + echo "Attempt $i/3..." + if (kubectl get nodes); then + echo "Successfully retrieved nodes." + break + else + echo "Retrying in 5 seconds..." + sleep 5 + fi + done - name: Configure replicated storage if: ${{ inputs.storage_type == 'replicated' }} - working-directory: test/dvp-over-dvp/storage/sds-replicated + working-directory: ${{ env.CLUSTER_TYPE_PATH }}/storage/sds-replicated run: | kubectl apply -f mc.yaml echo "Wait for sds-node-configurator" @@ -557,7 +604,7 @@ jobs: done } - cd test/dvp-over-dvp/storage/ceph + cd ${{ env.CLUSTER_TYPE_PATH }}/storage/ceph export registry=${{ secrets.PROD_IO_REGISTRY_DOCKER_CFG }} yq e '.spec.registry.dockerCfg = env(registry)' -i 00-ms.yaml unset registry @@ -921,7 +968,7 @@ jobs: uses: actions/download-artifact@v5 with: name: generated-files-${{ inputs.storage_type }} - path: test/dvp-over-dvp/ + path: ${{ env.CLUSTER_TYPE_PATH }}/ - name: Configure kubectl via azure/k8s-set-context@v4 uses: azure/k8s-set-context@v4 @@ -931,6 +978,6 @@ jobs: kubeconfig: ${{ secrets.VIRT_E2E_NIGHTLY_SA_TOKEN }} - name: infra-undeploy - working-directory: test/dvp-over-dvp + working-directory: ${{ env.CLUSTER_TYPE_PATH }} run: | task infra-undeploy diff --git a/test/dvp-static-cluster/Taskfile.yaml b/test/dvp-static-cluster/Taskfile.yaml index c73e1e3a70..8995cca2bf 100644 --- a/test/dvp-static-cluster/Taskfile.yaml +++ b/test/dvp-static-cluster/Taskfile.yaml @@ -3,15 +3,13 @@ version: "3" -includes: - vm: - taskfile: Taskfile.vm.yaml - vars: NAMESPACE: sh: yq eval '.namespace' values.yaml - D8_TAG: + DECKHOUSE_TAG: sh: yq eval '.deckhouse.tag' values.yaml + DEFAULT_USER: + sh: yq eval '.image.defaultUser' values.yaml TMP_DIR: ./tmp SSH_DIR: "{{ .TMP_DIR }}/ssh" SSH_FILE_NAME: cloud @@ -25,14 +23,17 @@ tasks: desc: Preflight / Create tmp dir cmds: - mkdir -p "{{ .TMP_DIR }}" + - mkdir -p "{{ .SSH_DIR }}" + status: + - test -d "{{ .TMP_DIR }}" + - test -d "{{ .SSH_DIR }}" ssh-gen: desc: Preflight / Generate ssh keypair for jump-host deps: - - create-tmp-dir + - create-tmp-dir cmds: - - mkdir -p "{{ .SSH_DIR }}" - - yes | ssh-keygen -t ed25519 -b 1024 -f {{ .SSH_PRIV_KEY_FILE }} -N "" -C "cloud" -v + - ssh-keygen -t ed25519 -b 1024 -f {{ .SSH_PRIV_KEY_FILE }} -N "" -C "cloud" - chmod 0600 "{{ .SSH_PUB_KEY_FILE }}" - chmod 0400 "{{ .SSH_PRIV_KEY_FILE }}" status: @@ -40,6 +41,8 @@ tasks: password-gen: desc: Preflight / Generate password + deps: + - ssh-gen cmds: - date +%s | sha256sum | base64 | head -c 10 > {{ .PASSWORD_FILE }} - | @@ -50,25 +53,30 @@ tasks: generate-helm-values: - desc: Generate helm values + desc: Generate helm values {{ .DISCOVERED_VALUES_FILE }} deps: - - ssh-gen - password-gen - - create-tmp-dir cmds: - touch {{ .DISCOVERED_VALUES_FILE }} - | - export SSH_PUB_KEY="$(cat {{ .SSH_PUB_KEY_FILE }})" - yq eval --inplace '.discovered.publicSSHKey = env(SSH_PUB_KEY)' {{ .DISCOVERED_VALUES_FILE }} + export SSH_PUB_KEY="$(cat {{ .SSH_PUB_KEY_FILE }})" + yq eval --inplace '.discovered.publicSSHKey = env(SSH_PUB_KEY)' {{ .DISCOVERED_VALUES_FILE }} + - | + export SSH_PRIV_KEY_B64="$(cat {{ .SSH_PRIV_KEY_FILE }} | base64 -w 0)" + yq eval --inplace '.discovered.privateSSHKeyBase64 = env(SSH_PRIV_KEY_B64)' {{ .DISCOVERED_VALUES_FILE }} - | - export SSH_PRIV_KEY_B64="$(cat {{ .SSH_PRIV_KEY_FILE }} | base64 -w 0)" - yq eval --inplace '.discovered.privateSSHKeyBase64 = env(SSH_PRIV_KEY_B64)' {{ .DISCOVERED_VALUES_FILE }} + export DOMAIN=$(kubectl get mc global -o json | jq '.spec.settings.modules.publicDomainTemplate | split(".")[1:] | join(".")' -rc) + yq eval --inplace '.discovered.domain = env(DOMAIN)' {{ .DISCOVERED_VALUES_FILE }} - | - export DOMAIN=$(kubectl get mc global -o json | jq '.spec.settings.modules.publicDomainTemplate | split(".")[1:] | join(".")' -rc) - yq eval --inplace '.discovered.domain = env(DOMAIN)' {{ .DISCOVERED_VALUES_FILE }} + export CLUSTER_DOMAIN=$(kubectl -n d8-system exec svc/deckhouse-leader -- deckhouse-controller global values -o json | jq -rc .clusterConfiguration.clusterDomain) + yq eval --inplace '.discovered.clusterDomain = env(CLUSTER_DOMAIN)' {{ .DISCOVERED_VALUES_FILE }} - | - export CLUSTER_DOMAIN=$(kubectl -n d8-system exec -it svc/deckhouse-leader -- deckhouse-controller global values -o json | jq -rc .clusterConfiguration.clusterDomain) - yq eval --inplace '.discovered.clusterDomain = env(CLUSTER_DOMAIN)' {{ .DISCOVERED_VALUES_FILE }} + export PASSWORD_HASH="$(cat {{ .PASSWORD_HASH_FILE }})" + yq eval --inplace '.discovered.passwordHash = env(PASSWORD_HASH)' {{ .DISCOVERED_VALUES_FILE }} + + render-vm-ips: + desc: Get VM IPs + cmds: - | if kubectl -n {{ .NAMESPACE }} get vm -o name 2>/dev/null | grep -q .; then export VM_IPS=$(kubectl -n {{ .NAMESPACE }} get vm -o json | jq -r '[.items[] | select(.status.ipAddress != null) | .metadata.name + ": " + .status.ipAddress] | join("\n")') @@ -76,17 +84,13 @@ tasks: else yq eval --inplace '.discovered.vmIPs = {}' {{ .DISCOVERED_VALUES_FILE }} fi - - | - export PASSWORD_HASH="$(cat {{ .PASSWORD_HASH_FILE }})" - yq eval --inplace '.discovered.passwordHash = env(PASSWORD_HASH)' {{ .DISCOVERED_VALUES_FILE }} render-infra: desc: Preparation / Generate infra manifests deps: - - ssh-gen - generate-helm-values cmds: - - helm template static-dvp-over-dvp-infra ./charts/infra -f values.yaml -f {{ .DISCOVERED_VALUES_FILE }} > {{ .TMP_DIR }}/infra.yaml + - helm template static-dvp-over-dvp-infra ./charts/infra -f values.yaml -f {{ .DISCOVERED_VALUES_FILE }} >> {{ .TMP_DIR }}/infra.yaml infra-deploy: deps: @@ -97,6 +101,7 @@ tasks: sh: date +%s cmds: - kubectl apply -f {{ .TMP_DIR }}/infra.yaml + - kubectl -n {{ .NAMESPACE }} get all - kubectl -n {{ .NAMESPACE }} wait --for=condition=Ready pod -l app=jump-host --timeout=300s - kubectl -n {{ .NAMESPACE }} get vi -o name | xargs kubectl -n {{ .NAMESPACE }} wait --for='jsonpath={.status.phase}=Ready' --timeout=600s - kubectl -n {{ .NAMESPACE }} get vd -o name | xargs kubectl -n {{ .NAMESPACE }} wait --for='jsonpath={.status.phase}=Ready' --timeout=600s @@ -106,12 +111,12 @@ tasks: export end_time=$(date +%s) difference=$((end_time - {{.start_time}})) date -ud "@$difference" +'%H:%M:%S' + - task: render-vm-ips infra-undeploy: desc: Destroy infra aliases: - uninstall - prompt: This command will destroy current infra... Do you want to continue? cmds: - kubectl delete -f {{ .TMP_DIR }}/infra.yaml || true - kubectl wait --for=delete namespace/{{ .NAMESPACE }} --timeout 300s || true @@ -139,28 +144,24 @@ tasks: - task render-cluster-config - task render-cluster-manifests - update-cluster: - desc: Update cluster - deps: - - render-cluster-manifests - cmds: - - rsync -azv -e "ssh -i {{ .SSH_PRIV_KEY_FILE }} -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o 'ProxyCommand=d8 v port-forward --stdio=true master-0.{{ .NAMESPACE }} 22'" {{ .TMP_DIR }}/config-manifests.yaml cloud@master-0:/tmp/config-manifests.yaml - - task: __ssh-command - vars: - CMD: sudo /opt/deckhouse/bin/kubectl apply -f /tmp/config-manifests.yaml + # update-cluster: + # desc: Update cluster + # deps: + # - render-cluster-manifests + # cmds: + # - rsync -azv -e "ssh -i {{ .SSH_PRIV_KEY_FILE }} -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o 'ProxyCommand=d8 v port-forward --stdio=true master-0.{{ .NAMESPACE }} 22'" {{ .TMP_DIR }}/config-manifests.yaml {{ .DEFAULT_USER }}@master-0:/tmp/config-manifests.yaml + # - task: __ssh-command + # vars: + # CMD: sudo /opt/deckhouse/bin/kubectl apply -f /tmp/config-manifests.yaml dhctl-bootstrap: desc: Bootstrap DKP over DVP deps: - render-cluster-config vars: - DeckhouseInstallImage: - sh: | - if $(yq eval '.cse' values.yaml); then - echo "dev-registry-cse.deckhouse.ru/sys/deckhouse-cse/install" - else - echo "dev-registry.deckhouse.io/sys/deckhouse-oss/install" - fi + DeckhouseInstallImage: "dev-registry.deckhouse.io/sys/deckhouse-oss/install" + prefix: + sh: yq eval '.storage_type' values.yaml start_time: sh: date +%s JUMPHOST_EXT_IP: @@ -168,18 +169,19 @@ tasks: JUMPHOST_NODEPORT: sh: kubectl -n {{ .NAMESPACE }} get svc jump-host -o json | jq '.spec.ports[] | select(.port==2222) | .nodePort' MASTER_NODE_IP: - sh: kubectl -n {{ .NAMESPACE }} get vm master-0 -o jsonpath="{.status.ipAddress}" + sh: kubectl -n {{ .NAMESPACE }} get vm {{.prefix}}-master-0 -o jsonpath="{.status.ipAddress}" cmds: - | - docker run --pull=always -it \ + docker run --pull=always \ -v "{{ .TMP_DIR }}/config.yaml:/config.yaml" \ -v "{{ .SSH_DIR }}:/tmp/.ssh/" \ - {{ .DeckhouseInstallImage }}:{{ .D8_TAG }} \ + -v "{{ .TMP_DIR }}/dhctl:/tmp/dhctl/" \ + {{ .DeckhouseInstallImage }}:{{ .DECKHOUSE_TAG }} \ dhctl bootstrap \ --config=/config.yaml \ --ssh-agent-private-keys=/tmp/.ssh/{{ .SSH_FILE_NAME }} \ --ssh-host={{ .MASTER_NODE_IP }} \ - --ssh-user=cloud \ + --ssh-user={{ .DEFAULT_USER }} \ --ssh-bastion-port={{ .JUMPHOST_NODEPORT }} \ --ssh-bastion-host={{ .JUMPHOST_EXT_IP }} \ --ssh-bastion-user=user \ @@ -208,13 +210,8 @@ tasks: - echo "Connect to master task ssh-to-master" - | echo "Host cluster master node: {{ .MASTER_NODE_NAME }}" - echo "Host cluster grafana URL: https://grafana.{{ .DOMAIN }}" echo "Namespace: {{ .NAMESPACE }}" - echo "ssh-pub key:" - cat {{ .SSH_PUB_KEY_FILE }} - echo "ssh-priv key:" - cat {{ .SSH_PRIV_KEY_FILE }} - echo "OS User: cloud" + echo "OS User: {{ .DEFAULT_USER }}" echo "Bastion: user@{{ .JUMPHOST_EXT_IP }}:{{ .JUMPHOST_NODEPORT }}" echo vms: kubectl -n {{ .NAMESPACE }} get vm @@ -234,11 +231,11 @@ tasks: ssh-to-master: cmds: - - /usr/bin/ssh -i {{ .SSH_PRIV_KEY_FILE }} -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o 'ProxyCommand=d8 v port-forward --stdio=true master-0.{{ .NAMESPACE }} 22' cloud@master-0 + - /usr/bin/ssh -i {{ .SSH_PRIV_KEY_FILE }} -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o 'ProxyCommand=d8 v port-forward --stdio=true master-0.{{ .NAMESPACE }} 22' {{ .DEFAULT_USER }}@master-0 ssh-to-worker: cmds: - - /usr/bin/ssh -i {{ .SSH_PRIV_KEY_FILE }} -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o 'ProxyCommand=d8 v port-forward --stdio=true worker-0.{{ .NAMESPACE }} 22' cloud@worker-0 + - /usr/bin/ssh -i {{ .SSH_PRIV_KEY_FILE }} -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o 'ProxyCommand=d8 v port-forward --stdio=true worker-0.{{ .NAMESPACE }} 22' {{ .DEFAULT_USER }}@worker-0 ssh-to-master-via-jumphost: vars: @@ -268,7 +265,7 @@ tasks: -J user@{{ .JUMPHOST_EXT_IP }}:{{ .JUMPHOST_NODEPORT }} \ -o StrictHostKeyChecking=no \ -o UserKnownHostsFile=/dev/null \ - cloud@{{ .MASTER_NODE_IP}} + {{ .DEFAULT_USER }}@{{ .MASTER_NODE_IP}} ssh-to-master-via-ws: vars: @@ -302,7 +299,7 @@ tasks: -J user@127.0.0.1:9999 \ -o StrictHostKeyChecking=no \ -o UserKnownHostsFile=/dev/null \ - cloud@{{ .MASTER_NODE_IP}} + {{ .DEFAULT_USER }}@{{ .MASTER_NODE_IP}} clean: cmds: @@ -313,7 +310,7 @@ tasks: silent: true internal: true cmds: - - /usr/bin/ssh -t -i {{ .SSH_PRIV_KEY_FILE }} -o LogLevel=ERROR -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o 'ProxyCommand=d8 v port-forward --stdio=true master-0.{{ .NAMESPACE }} 22' cloud@master-0 {{ .CMD }} + - /usr/bin/ssh -t -i {{ .SSH_PRIV_KEY_FILE }} -o LogLevel=ERROR -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o 'ProxyCommand=d8 v port-forward --stdio=true master-0.{{ .NAMESPACE }} 22' {{ .DEFAULT_USER }}@master-0 {{ .CMD }} kubectl: desc: Run kubectl on master. Usage example "task kubectl -- get pods -A" @@ -335,7 +332,7 @@ tasks: script: gen-lvg.sh config: /tmp/sds-local-lvg.yaml cmds: - - rsync -azv -e "ssh -i {{ .SSH_PRIV_KEY_FILE }} -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o 'ProxyCommand=d8 v port-forward --stdio=true master-0.{{ .NAMESPACE }} 22'" storage/sds-local-volume/{{ .script }} cloud@master-0:/tmp/ + - rsync -azv -e "ssh -i {{ .SSH_PRIV_KEY_FILE }} -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o 'ProxyCommand=d8 v port-forward --stdio=true master-0.{{ .NAMESPACE }} 22'" storage/sds-local-volume/{{ .script }} {{ .DEFAULT_USER }}@master-0:/tmp/ - task: __ssh-command vars: CMD: sudo chmod +x /tmp/{{ .script }} @@ -352,7 +349,7 @@ tasks: script: gen-sc.sh config: /tmp/sds-local-sc cmds: - - rsync -azv -e "ssh -i {{ .SSH_PRIV_KEY_FILE }} -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o 'ProxyCommand=d8 v port-forward --stdio=true master-0.{{ .NAMESPACE }} 22'" storage/sds-local-volume/{{ .script }} cloud@master-0:/tmp/ + - rsync -azv -e "ssh -i {{ .SSH_PRIV_KEY_FILE }} -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o 'ProxyCommand=d8 v port-forward --stdio=true master-0.{{ .NAMESPACE }} 22'" storage/sds-local-volume/{{ .script }} {{ .DEFAULT_USER }}@master-0:/tmp/ - task: __ssh-command vars: CMD: sudo chmod +x /tmp/{{ .script }} diff --git a/test/dvp-static-cluster/charts/cluster-config/templates/cluster-config.yaml b/test/dvp-static-cluster/charts/cluster-config/templates/cluster-config.yaml index e3587ba75d..d0c490ad6f 100644 --- a/test/dvp-static-cluster/charts/cluster-config/templates/cluster-config.yaml +++ b/test/dvp-static-cluster/charts/cluster-config/templates/cluster-config.yaml @@ -8,15 +8,24 @@ serviceSubnetCIDR: 10.99.0.0/16 kubernetesVersion: {{ .Values.deckhouse.kubernetesVersion | quote }} clusterDomain: "internal.{{ .Values.discovered.clusterDomain }}" defaultCRI: ContainerdV2 +proxy: + httpProxy: "{{ .Values.deckhouse.httpProxy }}" + httpsProxy: "{{ .Values.deckhouse.httpProxy }}" + noProxy: + - "localhost" + - "127.0.0.1" + - "10.0.0.0/8" + - "172.16.0.0/12" + - "192.168.0.0/16" + - "10.112.0.0/16" + - "10.223.0.0/16" + - docker.io + - ".ubuntu.com" --- apiVersion: deckhouse.io/v1 kind: InitConfiguration deckhouse: - {{- if .Values.cse }} - imagesRepo: dev-registry-cse.deckhouse.ru/sys/deckhouse-cse - {{- else }} imagesRepo: dev-registry.deckhouse.io/sys/deckhouse-oss - {{- end }} registryDockerCfg: {{ .Values.deckhouse.registryDockerCfg }} devBranch: {{ .Values.deckhouse.tag }} --- diff --git a/test/dvp-static-cluster/charts/cluster-config/templates/disabled-modules.yaml b/test/dvp-static-cluster/charts/cluster-config/templates/disabled-modules.yaml index 0162e92748..d4e82ce103 100644 --- a/test/dvp-static-cluster/charts/cluster-config/templates/disabled-modules.yaml +++ b/test/dvp-static-cluster/charts/cluster-config/templates/disabled-modules.yaml @@ -1,4 +1,4 @@ -{{- $modules := list "upmeter" "local-path-provisioner" "pod-reloader" "secret-copier" "namespace-configurator" "dashboard" -}} +{{- $modules := list "upmeter" "local-path-provisioner" "pod-reloader" "secret-copier" "namespace-configurator" "dashboard" "console" -}} {{- range $modules }} --- diff --git a/test/dvp-static-cluster/charts/cluster-config/templates/master-nodes.yaml b/test/dvp-static-cluster/charts/cluster-config/templates/master-nodes.yaml index 5c2b0b1ee9..0f41bc3524 100644 --- a/test/dvp-static-cluster/charts/cluster-config/templates/master-nodes.yaml +++ b/test/dvp-static-cluster/charts/cluster-config/templates/master-nodes.yaml @@ -30,7 +30,7 @@ spec: role: master {{range $_, $i := untilStep 0 (.Values.instances.masterNodes.count | int) 1}} - {{ $vmName := printf "master-%d" $i }} + {{ $vmName := printf "%s-master-%d" $.Values.storage_type $i }} --- apiVersion: deckhouse.io/v1alpha1 kind: StaticInstance diff --git a/test/dvp-static-cluster/charts/cluster-config/templates/modules-cse.yaml b/test/dvp-static-cluster/charts/cluster-config/templates/modules-cse.yaml deleted file mode 100644 index 0e2be95fba..0000000000 --- a/test/dvp-static-cluster/charts/cluster-config/templates/modules-cse.yaml +++ /dev/null @@ -1,18 +0,0 @@ -{{ $enabled := false }} -{{- if .Values.cse }} -{{- $enabled = true }} -{{- end }} ---- -apiVersion: deckhouse.io/v1alpha1 -kind: ModuleConfig -metadata: - name: multitenancy-manager -spec: - enabled: {{ $enabled }} ---- -apiVersion: deckhouse.io/v1alpha1 -kind: ModuleConfig -metadata: - name: gost-integrity-controller -spec: - enabled: {{ $enabled }} diff --git a/test/dvp-static-cluster/charts/cluster-config/templates/modules-dvp-base.yaml b/test/dvp-static-cluster/charts/cluster-config/templates/modules-dvp-base.yaml index d1bd57d44b..186a955850 100644 --- a/test/dvp-static-cluster/charts/cluster-config/templates/modules-dvp-base.yaml +++ b/test/dvp-static-cluster/charts/cluster-config/templates/modules-dvp-base.yaml @@ -28,8 +28,6 @@ metadata: name: user-authz spec: enabled: true - settings: - enableMultiTenancy: true version: 1 --- apiVersion: deckhouse.io/v1alpha1 @@ -61,7 +59,6 @@ metadata: spec: version: 1 enabled: true -{{- if not .Values.cse }} --- apiVersion: deckhouse.io/v1alpha2 kind: ModulePullOverride @@ -70,7 +67,6 @@ metadata: spec: imageTag: stable scanInterval: 15s -{{- end }} --- apiVersion: deckhouse.io/v1alpha1 kind: ModuleConfig @@ -118,7 +114,6 @@ metadata: spec: enabled: true version: 1 -{{- if not .Values.cse }} --- apiVersion: deckhouse.io/v1alpha2 kind: ModulePullOverride @@ -128,7 +123,6 @@ spec: imageTag: main rollback: false scanInterval: 10m0s -{{- end }} {{ if or .Values.modules.sdsLocalVolumeEnabled .Values.modules.sdsReplicatedVolumeEnabled }} --- @@ -139,7 +133,6 @@ metadata: spec: version: 1 enabled: true -{{- if not .Values.cse }} --- apiVersion: deckhouse.io/v1alpha2 kind: ModulePullOverride @@ -148,28 +141,6 @@ metadata: spec: imageTag: main scanInterval: 15s -{{- end }} -{{ end }} - -{{ if .Values.modules.sdsLocalVolumeEnabled }} ---- -apiVersion: deckhouse.io/v1alpha1 -kind: ModuleConfig -metadata: - name: sds-local-volume -spec: - version: 1 - enabled: true -{{- if not .Values.cse }} ---- -apiVersion: deckhouse.io/v1alpha2 -kind: ModulePullOverride -metadata: - name: sds-local-volume -spec: - imageTag: main - scanInterval: 15s -{{- end }} {{ end }} {{ if .Values.modules.sdsReplicatedVolumeEnabled }} @@ -181,7 +152,6 @@ metadata: spec: version: 1 enabled: true -{{- if not .Values.cse }} --- apiVersion: deckhouse.io/v1alpha2 kind: ModulePullOverride @@ -190,7 +160,6 @@ metadata: spec: imageTag: main scanInterval: 15s -{{- end }} {{ end }} ################################################################## @@ -237,25 +206,4 @@ spec: email: admin@deckhouse.io # echo "t3chn0l0gi4" | htpasswd -BinC 10 "" | cut -d: -f2 | base64 -w0 password: {{ .Values.discovered.passwordHash }} - -################################################################## -## console -################################################################## ---- -apiVersion: deckhouse.io/v1alpha1 -kind: ModuleConfig -metadata: - name: console -spec: - enabled: true -{{- if not .Values.cse }} ---- -apiVersion: deckhouse.io/v1alpha2 -kind: ModulePullOverride -metadata: - name: console -spec: - imageTag: master - scanInterval: 15s -{{- end }} -{{ end -}} +{{ end }} diff --git a/test/dvp-static-cluster/charts/cluster-config/templates/modules-minimal.yaml b/test/dvp-static-cluster/charts/cluster-config/templates/modules-minimal.yaml index 20410d13f1..46e5f71d8e 100644 --- a/test/dvp-static-cluster/charts/cluster-config/templates/modules-minimal.yaml +++ b/test/dvp-static-cluster/charts/cluster-config/templates/modules-minimal.yaml @@ -21,7 +21,7 @@ spec: settings: modules: publicDomainTemplate: "%s.{{ .Values.namespace }}.{{ .Values.discovered.domain }}" - defaultClusterStorageClass: nfs + # defaultClusterStorageClass: nfs --- apiVersion: deckhouse.io/v1alpha1 kind: ModuleConfig @@ -63,12 +63,4 @@ metadata: name: control-plane-manager spec: enabled: true -{{- if .Values.cse }} - settings: - apiserver: - auditPolicyEnabled: true - signature: Enforce - etcd: - maxDbSize: 6442450944 -{{- end }} version: 2 diff --git a/test/dvp-static-cluster/charts/cluster-config/templates/nodes.yaml b/test/dvp-static-cluster/charts/cluster-config/templates/nodes.yaml index c446976548..ad6845ad80 100644 --- a/test/dvp-static-cluster/charts/cluster-config/templates/nodes.yaml +++ b/test/dvp-static-cluster/charts/cluster-config/templates/nodes.yaml @@ -1,4 +1,4 @@ -{{range $_, $v := .Values.instances.additionalNodes }} +{{- range $_, $v := .Values.instances.additionalNodes }} {{ if (ne ($v.count |int) 0) }} --- apiVersion: deckhouse.io/v1 @@ -23,11 +23,10 @@ spec: labelSelector: matchLabels: role: {{ $v.name }} - {{ end }} - + {{- end }} - {{range $_, $i := untilStep 0 ($v.count | int) 1}} - {{ $vmName := printf "%s-%d" $v.name $i }} + {{- range $_, $i := untilStep 0 ($v.count | int) 1}} + {{- $vmName := printf "%s-%s-%d" $.Values.storage_type $v.name $i }} --- apiVersion: deckhouse.io/v1alpha1 kind: StaticInstance diff --git a/test/dvp-static-cluster/charts/cluster-config/templates/virtualization.yaml b/test/dvp-static-cluster/charts/cluster-config/templates/virtualization.yaml index 6d168d5788..fb63162c53 100644 --- a/test/dvp-static-cluster/charts/cluster-config/templates/virtualization.yaml +++ b/test/dvp-static-cluster/charts/cluster-config/templates/virtualization.yaml @@ -1,3 +1,4 @@ +{{/* {{- if eq .Values.deckhouse.bundle "Default" }} {{- if .Values.modules.virtualizationEnabled }} --- @@ -29,3 +30,4 @@ spec: {{ end -}} {{ end -}} {{ end -}} +*/}} \ No newline at end of file diff --git a/test/dvp-static-cluster/charts/infra/templates/_helpers.tpl b/test/dvp-static-cluster/charts/infra/templates/_helpers.tpl index 549c9de434..0f22da234c 100644 --- a/test/dvp-static-cluster/charts/infra/templates/_helpers.tpl +++ b/test/dvp-static-cluster/charts/infra/templates/_helpers.tpl @@ -62,13 +62,9 @@ spec: ssh_pwauth: true package_update: true packages: - - tmux - - htop - qemu-guest-agent - - iputils-ping - jq - rsync - - fio - bind9-dnsutils users: - default diff --git a/test/dvp-static-cluster/charts/infra/templates/ingress.yaml b/test/dvp-static-cluster/charts/infra/templates/ingress.yaml index df81d5326d..ccffd4b232 100644 --- a/test/dvp-static-cluster/charts/infra/templates/ingress.yaml +++ b/test/dvp-static-cluster/charts/infra/templates/ingress.yaml @@ -11,7 +11,7 @@ spec: protocol: TCP name: http selector: - group: master + group: {{ printf "%s-%s" $.Values.storage_type "master" }} --- apiVersion: v1 kind: Service @@ -25,7 +25,7 @@ spec: protocol: TCP name: https selector: - group: master + group: {{ printf "%s-%s" $.Values.storage_type "master" }} --- apiVersion: networking.k8s.io/v1 kind: Ingress diff --git a/test/dvp-static-cluster/charts/infra/templates/jump-host/deploy.yaml b/test/dvp-static-cluster/charts/infra/templates/jump-host/deploy.yaml index e76f76dbd0..419e62478b 100644 --- a/test/dvp-static-cluster/charts/infra/templates/jump-host/deploy.yaml +++ b/test/dvp-static-cluster/charts/infra/templates/jump-host/deploy.yaml @@ -29,7 +29,7 @@ spec: - containerPort: 2222 env: - name: SSH_KEY - value: "{{ .Values.sshPublicKey }}" + value: "{{ .Values.discovered.publicSSHKey }}" securityContext: runAsNonRoot: true runAsUser: 1000 diff --git a/test/dvp-static-cluster/charts/infra/templates/vms.yaml b/test/dvp-static-cluster/charts/infra/templates/vms.yaml index 6da53b3ca5..a55beaac3d 100644 --- a/test/dvp-static-cluster/charts/infra/templates/vms.yaml +++ b/test/dvp-static-cluster/charts/infra/templates/vms.yaml @@ -1,12 +1,12 @@ -{{range $_, $i := untilStep 0 (.Values.instances.masterNodes.count | int) 1}} - {{- $vmName := printf "master-%d" $i -}} +{{- range $_, $i := untilStep 0 (.Values.instances.masterNodes.count | int) 1}} + {{- $vmName := printf "%s-master-%d" $.Values.storage_type $i -}} {{ include "infra.vm" (list $ $vmName $.Values.instances.masterNodes.cfg) | nindent 0 }} {{- end }} -{{range $_, $v := .Values.instances.additionalNodes }} +{{- range $_, $v := .Values.instances.additionalNodes }} {{range $_, $i := untilStep 0 ($v.count | int) 1}} - {{- $vmName := printf "%s-%d" $v.name $i -}} + {{- $vmName := printf "%s-%s-%d" $.Values.storage_type $v.name $i -}} {{ include "infra.vm" (list $ $vmName $v.cfg) | nindent 0}} {{- end }} {{- end }} diff --git a/test/dvp-static-cluster/tools/deckhouse-queue.sh b/test/dvp-static-cluster/tools/deckhouse-queue.sh new file mode 100644 index 0000000000..cada5c5a46 --- /dev/null +++ b/test/dvp-static-cluster/tools/deckhouse-queue.sh @@ -0,0 +1,145 @@ +#!/usr/bin/env bash + +# Copyright 2025 Flant JSC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Colors for output +RED='\033[0;31m' +GREEN='\033[0;32m' +YELLOW='\033[1;33m' +BLUE='\033[0;34m' +CYAN='\033[0;36m' +NC='\033[0m' # No Color + +get_current_date() { + date +"%H:%M:%S %d-%m-%Y" +} + +get_timestamp() { + date +%s +} + +log_info() { + local message="$1" + local timestamp=$(get_current_date) + echo -e "[$timestamp] ${BLUE}[INFO]${NC} $message" +} + +log_success() { + local message="$1" + local timestamp=$(get_current_date) + echo -e "[$timestamp] ${GREEN}[SUCCESS]${NC} $message" +} + +log_warning() { + local message="$1" + local timestamp=$(get_current_date) + echo -e "[$timestamp] ${YELLOW}[WARNING]${NC} $message" +} + +log_error() { + local message="$1" + local timestamp=$(get_current_date) + echo -e "[$timestamp] ${RED}[ERROR]${NC} $message" +} + +kubectl() { + /opt/deckhouse/bin/kubectl $@ + # sudo /opt/deckhouse/bin/kubectl $@ +} + +d8() { + /opt/deckhouse/bin/d8 $@ + # sudo /opt/deckhouse/bin/d8 $@ +} + + +d8_queue_main() { + echo "$( d8 p queue main | grep -Po '(?<=length )([0-9]+)' )" +} + +d8_queue_list() { + d8 p queue list | grep -Po '([0-9]+)(?= active)' +} + +d8_queue() { + local count=90 + # local main_queue_ready=false + local list_queue_ready=false + + for i in $(seq 1 $count) ; do + # if [ $(d8_queue_main) == "0" ]; then + # echo "main queue is clear" + # main_queue_ready=true + # else + # echo "Show main queue" + # d8 p queue main | head -n25 || echo "Failed to retrieve main queue" + # fi + + if [ $(d8_queue_list) == "0" ]; then + echo "list queue list is clear" + list_queue_ready=true + else + echo "Show queue list" + d8 p queue list | head -n25 || echo "Failed to retrieve queue" + fi + + if [ "$list_queue_ready" = true ]; then + # if [ "$main_queue_ready" = true ] && [ "$list_queue_ready" = true ]; then + break + fi + echo "Wait until queues are empty ${i}/${count}" + sleep 10 + done +} + +d8_ready() { + local ready=false + local count=60 + common_start_time=$(get_timestamp) + for i in $(seq 1 $count) ; do + start_time=$(get_timestamp) + if kubectl -n d8-system wait deploy/deckhouse --for condition=available --timeout=20s 2>/dev/null; then + ready=true + break + fi + end_time=$(get_timestamp) + difference=$((end_time - start_time)) + log_info "Wait until deckhouse is ready ${i}/${count} after ${difference}s" + if (( i % 5 == 0 )); then + kubectl -n d8-system get pods + d8 p queue list | head -n25 || echo "Failed to retrieve queue" + fi + done + + if [ "$ready" = true ]; then + log_success "Deckhouse is Ready!" + echo "Checking queues" + d8_queue + else + common_end_time=$(get_timestamp) + common_difference=$((common_end_time - common_start_time)) + common_formatted_difference=$(date -u +'%H:%M:%S' -d "@$common_difference") + log_error "Deckhouse is not ready after ${count} attempts and ${common_formatted_difference} time, check its queue for errors:" + d8 p queue main | head -n25 + exit 1 + fi +} + +start_time=$(get_timestamp) +log_info "Checking that deckhouse is ready" +d8_ready +end_time=$(get_timestamp) +difference=$((end_time - start_time)) +log_success "Deckhouse is ready after $(date -ud "@$difference" +'%H:%M:%S')" From d9ce80a422937a631f33380a5cb6ca9c0a139023 Mon Sep 17 00:00:00 2001 From: Nikita Korolev Date: Thu, 18 Dec 2025 17:32:55 +0300 Subject: [PATCH 10/71] remove setup via dhctl cloud provider and leave only static Signed-off-by: Nikita Korolev --- .github/workflows/e2e-matrix.yml | 13 +- .github/workflows/e2e-reusable-pipeline.yml | 222 +++++++--------- test/dvp-over-dvp/Taskfile.yaml | 244 ----------------- .../charts/cluster-config/.helmignore | 23 -- .../charts/cluster-config/Chart.yaml | 24 -- .../templates/cluster-config.yaml | 82 ------ .../templates/disabled-modules.yaml | 11 - .../cluster-config/templates/ingress.yaml | 17 -- .../charts/cluster-config/templates/mc.yaml | 96 ------- .../charts/cluster-config/templates/nfs.yaml | 35 --- .../charts/cluster-config/templates/ngc.yaml | 54 ---- .../charts/cluster-config/templates/rbac.yaml | 20 -- test/dvp-over-dvp/charts/infra/.helmignore | 23 -- test/dvp-over-dvp/charts/infra/Chart.yaml | 24 -- .../charts/infra/templates/ingress.yaml | 74 ------ .../infra/templates/jump-host/deploy.yaml | 38 --- .../charts/infra/templates/jump-host/svc.yaml | 14 - .../infra/templates/nfs-server/deploy.yaml | 44 ---- .../infra/templates/nfs-server/pvc.yaml | 15 -- .../infra/templates/nfs-server/svc.yaml | 21 -- .../charts/infra/templates/ns.yaml | 4 - .../charts/infra/templates/rbac/rbac.yaml | 28 -- .../charts/infra/templates/vi.yaml | 12 - .../charts/infra/templates/vmc.yaml | 7 - test/dvp-over-dvp/nested-sa-config/gen-sa.sh | 184 ------------- test/dvp-over-dvp/storage/ceph/00-ms.yaml | 10 - test/dvp-over-dvp/storage/ceph/01-mc.yaml | 36 --- test/dvp-over-dvp/storage/ceph/02-sa.yaml | 74 ------ test/dvp-over-dvp/storage/ceph/03-cm.yaml | 245 ------------------ .../dvp-over-dvp/storage/ceph/04-cluster.yaml | 111 -------- .../storage/ceph/05-blockpool.yaml | 13 - .../dvp-over-dvp/storage/ceph/06-toolbox.yaml | 83 ------ .../storage/ceph/ceph-configure.sh | 85 ------ .../storage/sds-replicated/lvg-gen.sh | 57 ---- .../storage/sds-replicated/mc.yaml | 32 --- .../storage/sds-replicated/rsc-gen.sh | 87 ------- test/dvp-over-dvp/tools/deckhouse-queue.sh | 145 ----------- test/dvp-static-cluster/Taskfile.yaml | 184 ++----------- .../templates/cluster-config.yaml | 20 +- .../templates/disabled-modules.yaml | 2 +- .../templates/modules-dvp-base.yaml | 76 ------ .../templates/modules-minimal.yaml | 1 - .../charts/cluster-config/templates/nfs.yaml | 43 --- .../charts/cluster-config/templates/ngc.yaml | 2 +- .../templates/virtualization.yaml | 33 --- .../infra/templates/jump-host/ingress.yaml | 40 --- .../storage/ceph/01-mc.yaml | 12 +- 47 files changed, 126 insertions(+), 2594 deletions(-) delete mode 100644 test/dvp-over-dvp/Taskfile.yaml delete mode 100644 test/dvp-over-dvp/charts/cluster-config/.helmignore delete mode 100644 test/dvp-over-dvp/charts/cluster-config/Chart.yaml delete mode 100644 test/dvp-over-dvp/charts/cluster-config/templates/cluster-config.yaml delete mode 100644 test/dvp-over-dvp/charts/cluster-config/templates/disabled-modules.yaml delete mode 100644 test/dvp-over-dvp/charts/cluster-config/templates/ingress.yaml delete mode 100644 test/dvp-over-dvp/charts/cluster-config/templates/mc.yaml delete mode 100644 test/dvp-over-dvp/charts/cluster-config/templates/nfs.yaml delete mode 100644 test/dvp-over-dvp/charts/cluster-config/templates/ngc.yaml delete mode 100644 test/dvp-over-dvp/charts/cluster-config/templates/rbac.yaml delete mode 100644 test/dvp-over-dvp/charts/infra/.helmignore delete mode 100644 test/dvp-over-dvp/charts/infra/Chart.yaml delete mode 100644 test/dvp-over-dvp/charts/infra/templates/ingress.yaml delete mode 100644 test/dvp-over-dvp/charts/infra/templates/jump-host/deploy.yaml delete mode 100644 test/dvp-over-dvp/charts/infra/templates/jump-host/svc.yaml delete mode 100644 test/dvp-over-dvp/charts/infra/templates/nfs-server/deploy.yaml delete mode 100644 test/dvp-over-dvp/charts/infra/templates/nfs-server/pvc.yaml delete mode 100644 test/dvp-over-dvp/charts/infra/templates/nfs-server/svc.yaml delete mode 100644 test/dvp-over-dvp/charts/infra/templates/ns.yaml delete mode 100644 test/dvp-over-dvp/charts/infra/templates/rbac/rbac.yaml delete mode 100644 test/dvp-over-dvp/charts/infra/templates/vi.yaml delete mode 100644 test/dvp-over-dvp/charts/infra/templates/vmc.yaml delete mode 100644 test/dvp-over-dvp/nested-sa-config/gen-sa.sh delete mode 100644 test/dvp-over-dvp/storage/ceph/00-ms.yaml delete mode 100644 test/dvp-over-dvp/storage/ceph/01-mc.yaml delete mode 100644 test/dvp-over-dvp/storage/ceph/02-sa.yaml delete mode 100644 test/dvp-over-dvp/storage/ceph/03-cm.yaml delete mode 100644 test/dvp-over-dvp/storage/ceph/04-cluster.yaml delete mode 100644 test/dvp-over-dvp/storage/ceph/05-blockpool.yaml delete mode 100644 test/dvp-over-dvp/storage/ceph/06-toolbox.yaml delete mode 100644 test/dvp-over-dvp/storage/ceph/ceph-configure.sh delete mode 100755 test/dvp-over-dvp/storage/sds-replicated/lvg-gen.sh delete mode 100644 test/dvp-over-dvp/storage/sds-replicated/mc.yaml delete mode 100644 test/dvp-over-dvp/storage/sds-replicated/rsc-gen.sh delete mode 100644 test/dvp-over-dvp/tools/deckhouse-queue.sh delete mode 100644 test/dvp-static-cluster/charts/cluster-config/templates/nfs.yaml delete mode 100644 test/dvp-static-cluster/charts/cluster-config/templates/virtualization.yaml delete mode 100644 test/dvp-static-cluster/charts/infra/templates/jump-host/ingress.yaml diff --git a/.github/workflows/e2e-matrix.yml b/.github/workflows/e2e-matrix.yml index fb86317f67..0ffea65124 100644 --- a/.github/workflows/e2e-matrix.yml +++ b/.github/workflows/e2e-matrix.yml @@ -37,11 +37,10 @@ jobs: with: storage_type: ceph nested_storageclass_name: nested-ceph-pool-r2-csi-rbd - default_cluster_storageclass: ceph-pool-r2-csi-rbd-immediate branch: main virtualization_tag: main deckhouse_tag: main - default_user: ubuntu + default_user: cloud go_version: "1.24.6" e2e_timeout: "3h" secrets: @@ -56,11 +55,10 @@ jobs: with: storage_type: replicated nested_storageclass_name: nested-thin-r1 - default_cluster_storageclass: ceph-pool-r2-csi-rbd-immediate branch: main virtualization_tag: main deckhouse_tag: main - default_user: ubuntu + default_user: cloud go_version: "1.24.6" e2e_timeout: "3h" secrets: @@ -69,11 +67,10 @@ jobs: PROD_IO_REGISTRY_DOCKER_CFG: ${{ secrets.PROD_IO_REGISTRY_DOCKER_CFG }} BOOTSTRAP_DEV_PROXY: ${{ secrets.BOOTSTRAP_DEV_PROXY }} - report-to-channel: runs-on: ubuntu-latest name: End-to-End tests report - needs: + needs: - e2e-ceph - e2e-replicated if: ${{ always()}} @@ -201,10 +198,10 @@ jobs: COMBINED_SUMMARY+="${markdown_table}\n" echo -e "$COMBINED_SUMMARY" - + # Send to channel if webhook is configured if [ -n "$LOOP_WEBHOOK_URL" ]; then curl --request POST --header 'Content-Type: application/json' --data "{\"text\": \"${COMBINED_SUMMARY}\"}" "$LOOP_WEBHOOK_URL" fi env: - LOOP_WEBHOOK_URL: ${{ secrets.LOOP_TEST_CHANNEL }} \ No newline at end of file + LOOP_WEBHOOK_URL: ${{ secrets.LOOP_TEST_CHANNEL }} diff --git a/.github/workflows/e2e-reusable-pipeline.yml b/.github/workflows/e2e-reusable-pipeline.yml index bdaf8e1069..d57437da06 100644 --- a/.github/workflows/e2e-reusable-pipeline.yml +++ b/.github/workflows/e2e-reusable-pipeline.yml @@ -20,15 +20,11 @@ on: storage_type: required: true type: string - description: "Storage type (ceph or replicated)" + description: "Storage type (ceph or replicated or etc.)" nested_storageclass_name: required: true type: string description: "Nested storage class name" - default_cluster_storageclass: - required: true - type: string - description: "Default cluster storage class" branch: required: false type: string @@ -48,7 +44,7 @@ on: required: false type: string default: "ubuntu" - description: "Default user" + description: "Default user for vms" go_version: required: false type: string @@ -73,16 +69,13 @@ on: description: "E2E test results" value: ${{ jobs.e2e-test.outputs.report-summary }} - env: BRANCH: ${{ inputs.branch }} VIRTUALIZATION_TAG: ${{ inputs.virtualization_tag }} DECKHOUSE_TAG: ${{ inputs.deckhouse_tag }} - # DEFAULT_USER: ${{ inputs.default_user }} - DEFAULT_USER: cloud + DEFAULT_USER: ${{ inputs.default_user }} GO_VERSION: ${{ inputs.go_version }} - # CLUSTER_TYPE_PATH: ${{ env.CLUSTER_TYPE_PATH }} - CLUSTER_TYPE_PATH: test/dvp-static-cluster + SETUP_CLUSTER_TYPE_PATH: test/dvp-static-cluster defaults: run: @@ -150,7 +143,7 @@ jobs: kubeconfig: ${{ secrets.VIRT_E2E_NIGHTLY_SA_TOKEN }} - name: Generate values.yaml - working-directory: ${{ env.CLUSTER_TYPE_PATH }} + working-directory: ${{ env.SETUP_CLUSTER_TYPE_PATH }} run: | defaultStorageClass=$(kubectl get storageclass -o json \ | jq -r '.items[] | select(.metadata.annotations."storageclass.kubernetes.io/is-default-class" == "true") | .metadata.name') @@ -159,25 +152,29 @@ jobs: namespace: ${{ steps.vars.outputs.namespace }} storage_type: ${{ inputs.storage_type }} storageClass: ${defaultStorageClass} - nfsEnabled: false - nfsSC: nested-nfs-${{ inputs.storage_type }}-${{ steps.vars.outputs.sha_short }} - defaultClusterStorageClass: ${{ inputs.default_cluster_storageclass }} - clusterConfigurationPrefix: ${{ inputs.storage_type }} sa: dkp-sa deckhouse: tag: ${{ env.DECKHOUSE_TAG }} kubernetesVersion: Automatic registryDockerCfg: ${{ secrets.DEV_REGISTRY_DOCKER_CFG }} - httpProxy: ${{ secrets.BOOTSTRAP_DEV_PROXY }} bundle: Default + proxy: + httpProxy: ${{ secrets.BOOTSTRAP_DEV_PROXY }} + httpsProxy: ${{ secrets.BOOTSTRAP_DEV_PROXY }} + noProxy: + - "localhost" + - "127.0.0.1" + - "10.0.0.0/8" + - "172.16.0.0/12" + - "192.168.0.0/16" + - "10.112.0.0/16" + - "10.223.0.0/16" + - "docker.io" + - ".ubuntu.com" image: url: https://cloud-images.ubuntu.com/noble/current/noble-server-cloudimg-amd64.img - # url: https://89d64382-20df-4581-8cc7-80df331f67fa.selstorage.ru/ubuntu/noble-server-cloudimg-amd64.img defaultUser: ${{ env.DEFAULT_USER }} bootloader: BIOS - modules: - virtualizationEnabled: false - sdsReplicatedVolumeEnabled: false ingressHosts: - api - grafana @@ -208,48 +205,34 @@ jobs: - size: 50Gi EOF - # instances: - # masterNodes: - # count: 1 - # cores: 8 - # coreFraction: 50% - # memory: 14Gi - # additionalNodes: - # - name: worker - # count: 3 - # cores: 10 - # coreFraction: 25% - # memory: 8Gi - # bootloader: BIOS - name: Bootstrap cluster [infra-deploy] - working-directory: ${{ env.CLUSTER_TYPE_PATH }} + working-directory: ${{ env.SETUP_CLUSTER_TYPE_PATH }} run: | task infra-deploy - name: Bootstrap cluster [dhctl-bootstrap] id: dhctl-bootstrap - working-directory: ${{ env.CLUSTER_TYPE_PATH }} + working-directory: ${{ env.SETUP_CLUSTER_TYPE_PATH }} run: | task dhctl-bootstrap timeout-minutes: 30 - name: Bootstrap cluster [show-connection-info] - working-directory: ${{ env.CLUSTER_TYPE_PATH }} + working-directory: ${{ env.SETUP_CLUSTER_TYPE_PATH }} run: | task show-connection-info - + - name: Save ssh to secrets in cluster env: NAMESPACE: ${{ steps.vars.outputs.namespace }} if: always() run: | - kubectl -n $NAMESPACE create secret generic ssh-key --from-file=${{ env.CLUSTER_TYPE_PATH }}/tmp/ssh/cloud + kubectl -n $NAMESPACE create secret generic ssh-key --from-file=${{ env.SETUP_CLUSTER_TYPE_PATH }}/tmp/ssh/cloud - name: Get info about nested master VM - working-directory: ${{ env.CLUSTER_TYPE_PATH }} + working-directory: ${{ env.SETUP_CLUSTER_TYPE_PATH }} env: NAMESPACE: ${{ steps.vars.outputs.namespace }} PREFIX: ${{ inputs.storage_type }} run: | - # nested_master=$(kubectl -n ${NAMESPACE} get vm -l dvp.deckhouse.io/node-group=master -o jsonpath="{.items[0].metadata.name}") nested_master=$(kubectl -n ${NAMESPACE} get vm -l group=${PREFIX}-master -o jsonpath="{.items[0].metadata.name}") echo "Pods" @@ -284,14 +267,12 @@ jobs: - name: Generate nested kubeconfig id: generate-kubeconfig - working-directory: ${{ env.CLUSTER_TYPE_PATH }} + working-directory: ${{ env.SETUP_CLUSTER_TYPE_PATH }} env: kubeConfigPath: tmp/kube.config NAMESPACE: ${{ steps.vars.outputs.namespace }} PREFIX: ${{ inputs.storage_type }} run: | - # nested_master=$(kubectl -n $NAMESPACE get vm -l dvp.deckhouse.io/node-group=master -o jsonpath="{.items[0].metadata.name}") - nested_master=$(kubectl -n ${NAMESPACE} get vm -l group=${PREFIX}-master -o jsonpath="{.items[0].metadata.name}") d8vscp() { @@ -322,41 +303,49 @@ jobs: d8vscp "./tools/deckhouse-queue.sh" "${DEFAULT_USER}@${nested_master}.${NAMESPACE}:/tmp/deckhouse-queue.sh" echo "" - d8 v ssh -i ./tmp/ssh/cloud \ - --local-ssh=true \ - --local-ssh-opts="-o StrictHostKeyChecking=no" \ - --local-ssh-opts="-o UserKnownHostsFile=/dev/null" \ - ${DEFAULT_USER}@${nested_master}.${NAMESPACE} \ - -c 'chmod +x /tmp/{gen-sa.sh,deckhouse-queue.sh}' - echo "" - - d8 v ssh -i ./tmp/ssh/cloud \ - --local-ssh=true \ - --local-ssh-opts="-o StrictHostKeyChecking=no" \ - --local-ssh-opts="-o UserKnownHostsFile=/dev/null" \ - ${DEFAULT_USER}@${nested_master}.${NAMESPACE} \ - -c 'ls -la /tmp/' - echo "===" - + echo "Set file exec permissions" + d8vssh 'chmod +x /tmp/{gen-sa.sh,deckhouse-queue.sh}' + d8vssh 'ls -la /tmp/' echo "Check d8 queue" - d8 v ssh -i ./tmp/ssh/cloud \ - --local-ssh=true \ - --local-ssh-opts="-o StrictHostKeyChecking=no" \ - --local-ssh-opts="-o UserKnownHostsFile=/dev/null" \ - ${DEFAULT_USER}@${nested_master}.${NAMESPACE} \ - -c 'sudo /tmp/deckhouse-queue.sh' + d8vssh 'sudo /tmp/deckhouse-queue.sh' + + # d8 v ssh -i ./tmp/ssh/cloud \ + # --local-ssh=true \ + # --local-ssh-opts="-o StrictHostKeyChecking=no" \ + # --local-ssh-opts="-o UserKnownHostsFile=/dev/null" \ + # ${DEFAULT_USER}@${nested_master}.${NAMESPACE} \ + # -c 'chmod +x /tmp/{gen-sa.sh,deckhouse-queue.sh}' + # echo "" + + # d8 v ssh -i ./tmp/ssh/cloud \ + # --local-ssh=true \ + # --local-ssh-opts="-o StrictHostKeyChecking=no" \ + # --local-ssh-opts="-o UserKnownHostsFile=/dev/null" \ + # ${DEFAULT_USER}@${nested_master}.${NAMESPACE} \ + # -c 'ls -la /tmp/' + # echo "===" + + # echo "Check d8 queue" + # d8 v ssh -i ./tmp/ssh/cloud \ + # --local-ssh=true \ + # --local-ssh-opts="-o StrictHostKeyChecking=no" \ + # --local-ssh-opts="-o UserKnownHostsFile=/dev/null" \ + # ${DEFAULT_USER}@${nested_master}.${NAMESPACE} \ + # -c 'sudo /tmp/deckhouse-queue.sh' echo "Generate kube conf in nested cluster" echo "run nested-sa-config/gen-sa.sh" # "Usage: gen-sa.sh [FILE_NAME]" echo "===" - d8 v ssh -i ./tmp/ssh/cloud \ - --local-ssh=true \ - --local-ssh-opts="-o StrictHostKeyChecking=no" \ - --local-ssh-opts="-o UserKnownHostsFile=/dev/null" \ - ${DEFAULT_USER}@${nested_master}.$NAMESPACE \ - -c "sudo /tmp/gen-sa.sh nested-sa nested nested-e2e /${kubeConfigPath}" + # d8 v ssh -i ./tmp/ssh/cloud \ + # --local-ssh=true \ + # --local-ssh-opts="-o StrictHostKeyChecking=no" \ + # --local-ssh-opts="-o UserKnownHostsFile=/dev/null" \ + # ${DEFAULT_USER}@${nested_master}.$NAMESPACE \ + # -c "sudo /tmp/gen-sa.sh nested-sa nested nested-e2e /${kubeConfigPath}" + + d8vssh "sudo /tmp/gen-sa.sh nested-sa nested nested-e2e /${kubeConfigPath}" echo "'sudo /tmp/gen-sa.sh nested-sa nested nested-e2e /${kubeConfigPath}' - done" echo "" @@ -381,7 +370,6 @@ jobs: NAMESPACE: ${{ steps.vars.outputs.namespace }} PREFIX: ${{ inputs.storage_type }} run: | - # nested_master=$(kubectl -n $NAMESPACE get vm -l dvp.deckhouse.io/node-group=master -o jsonpath="{.items[0].metadata.name}") nested_master=$(kubectl -n ${NAMESPACE} get vm -l group=${PREFIX}-master -o jsonpath="{.items[0].metadata.name}") d8vscp() { @@ -395,16 +383,16 @@ jobs: echo "d8vscp: $source -> $dest - done" } - d8vscp "${DEFAULT_USER}@${nested_master}.$NAMESPACE:/var/log/cloud-init*.log" "./${{ env.CLUSTER_TYPE_PATH }}/tmp/" + d8vscp "${DEFAULT_USER}@${nested_master}.$NAMESPACE:/var/log/cloud-init*.log" "./${{ env.SETUP_CLUSTER_TYPE_PATH }}/tmp/" - name: Prepare artifact if: always() run: | - sudo chown -fR 1001:1001 ${{ env.CLUSTER_TYPE_PATH }} - yq e '.deckhouse.registryDockerCfg = "None"' -i ./${{ env.CLUSTER_TYPE_PATH }}/values.yaml - yq e 'select(.kind == "InitConfiguration") .deckhouse.registryDockerCfg = "None"' -i ./${{ env.CLUSTER_TYPE_PATH }}/tmp/config.yaml - echo "${{ steps.generate-kubeconfig.outputs.config }}" | base64 -d | base64 -d > ./${{ env.CLUSTER_TYPE_PATH }}/kube-config - + sudo chown -fR 1001:1001 ${{ env.SETUP_CLUSTER_TYPE_PATH }} + yq e '.deckhouse.registryDockerCfg = "None"' -i ./${{ env.SETUP_CLUSTER_TYPE_PATH }}/values.yaml + yq e 'select(.kind == "InitConfiguration") .deckhouse.registryDockerCfg = "None"' -i ./${{ env.SETUP_CLUSTER_TYPE_PATH }}/tmp/config.yaml || echo "The config.yaml file is not generated, skipping" + echo "${{ steps.generate-kubeconfig.outputs.config }}" | base64 -d | base64 -d > ./${{ env.SETUP_CLUSTER_TYPE_PATH }}/kube-config + - name: Upload generated files uses: actions/upload-artifact@v4 id: artifact-upload @@ -412,8 +400,8 @@ jobs: with: name: generated-files-${{ inputs.storage_type }} path: | - ${{ env.CLUSTER_TYPE_PATH }}/tmp - ${{ env.CLUSTER_TYPE_PATH }}/values.yaml + ${{ env.SETUP_CLUSTER_TYPE_PATH }}/tmp + ${{ env.SETUP_CLUSTER_TYPE_PATH }}/values.yaml overwrite: true include-hidden-files: true retention-days: 1 @@ -424,18 +412,18 @@ jobs: if: always() with: name: generated-files-ssh-${{ inputs.storage_type }} - path: ${{ env.CLUSTER_TYPE_PATH }}/tmp/ssh + path: ${{ env.SETUP_CLUSTER_TYPE_PATH }}/tmp/ssh overwrite: true include-hidden-files: true retention-days: 1 - + - name: Upload kubeconfig config uses: actions/upload-artifact@v4 id: artifact-upload-kubeconfig if: always() with: name: generated-files-kubeconfig-${{ inputs.storage_type }} - path: ${{ env.CLUSTER_TYPE_PATH }}/kube-config + path: ${{ env.SETUP_CLUSTER_TYPE_PATH }}/kube-config overwrite: true include-hidden-files: true retention-days: 1 @@ -485,13 +473,11 @@ jobs: - name: Configure replicated storage if: ${{ inputs.storage_type == 'replicated' }} - working-directory: ${{ env.CLUSTER_TYPE_PATH }}/storage/sds-replicated + working-directory: ${{ env.SETUP_CLUSTER_TYPE_PATH }}/storage/sds-replicated run: | kubectl apply -f mc.yaml echo "Wait for sds-node-configurator" kubectl wait --for=jsonpath='{.status.phase}'=Ready modules sds-node-configurator --timeout=300s - # echo "Wait for sds-replicated" - # kubectl wait --for=jsonpath='{.status.phase}'=Ready modules sds-replicated-volume --timeout=300s for i in {1..60}; do sds_replicated_volume_status=$(kubectl get ns d8-sds-replicated-volume -o jsonpath='{.status.phase}' || echo "False") @@ -555,11 +541,10 @@ jobs: echo "Waiting 10s for sds-replicated-volume to be ready" if (( i % 5 == 0 )); then - echo "= Get pods =" + echo "[DEBUG] Get pods" kubectl -n d8-sds-replicated-volume get pods || true - echo "Show queue" - d8 p queue list | head -n25 || echo "Failed to retrieve list queue" - echo "=====" + echo "[DEBUG] Show queue (first 25 lines)" + d8 p queue list | head -n 25 || echo "Failed to retrieve list queue" fi done @@ -568,12 +553,13 @@ jobs: chmod +x rsc-gen.sh ./rsc-gen.sh - - echo "====== Show nested storageclasses =======" + + echo "Enshure that nested storageclasses are created" kubectl get sc | grep nested || echo "No nested storageclasses" echo "Done" - name: Configure ceph storage if: ${{ inputs.storage_type == 'ceph' }} + working-directory: ${{ env.SETUP_CLUSTER_TYPE_PATH }}/storage/ceph run: | d8_queue_list() { d8 p queue list | grep -Po '([0-9]+)(?= active)' || echo "Failed to retrieve list queue" @@ -604,18 +590,19 @@ jobs: done } - cd ${{ env.CLUSTER_TYPE_PATH }}/storage/ceph export registry=${{ secrets.PROD_IO_REGISTRY_DOCKER_CFG }} yq e '.spec.registry.dockerCfg = env(registry)' -i 00-ms.yaml unset registry echo "Create prod module source" kubectl apply -f 00-ms.yaml - kubectl get ms + kubectl wait --for=jsonpath='{.status.phase}' modulesource deckhouse-prod --timeout=30s + kubectl get modulesources echo "Create ceph operator and csi module config" kubectl apply -f 01-mc.yaml + echo "Wait while queues are empty" d8_queue echo "Start wait for ceph operator and csi" @@ -668,20 +655,16 @@ jobs: echo "Waiting 10s for ceph operator to be ready" kubectl -n d8-operator-ceph get po || echo "Failed to retrieve pods" if (( i % 5 == 0 )); then - echo "= Get ceph ns =" + echo "[DEBUG] Show ceph ns" kubectl get ns | grep ceph || echo "Failed to retrieve ceph ns" - echo "= Get mc =" + echo "[DEBUG] Show mc ceph" kubectl get mc | grep ceph || echo "Failed to retrieve mc" - echo "= Get modules =" + echo "[DEBUG] Show ceph in resource modules" kubectl get modules -o wide | grep ceph || echo "Failed to retrieve modules" - echo "=====" - echo "Show queue" - d8 p queue list | head -n25 || echo "Failed to retrieve list queue" - echo "=====" + echo "[DEBUG] Show queue" + d8 p queue list | head -n 25 || echo "Failed to retrieve list queue" fi - echo "====" - echo "Wait until all necessary pods are ready ${i}/60" - echo "====" + echo "[INFO] Wait until all necessary pods are ready ${i}/60" sleep 10 done @@ -693,9 +676,8 @@ jobs: echo "Wait for rook-ceph-tools, timeout 300s" kubectl -n d8-operator-ceph wait --for=condition=Available deployment/rook-ceph-tools --timeout=300s - echo "-- ls ceph pool --" + echo "Show ceph pools via rook-ceph-tools" kubectl -n d8-operator-ceph exec deployments/rook-ceph-tools -c ceph-tools -- ceph osd pool ls - echo "------" echo "Configure storage class" chmod +x ./ceph-configure.sh @@ -760,9 +742,6 @@ jobs: kubectl get mpo virtualization - name: Wait for Virtualization to be ready run: | - echo "Waiting for Virtualization module to be ready" - # kubectl wait --for=jsonpath='{.status.phase}'=Ready modules virtualization --timeout=300s - d8_queue_list() { d8 p queue list | grep -Po '([0-9]+)(?= active)' || echo "Failed to retrieve list queue" } @@ -796,16 +775,14 @@ jobs: sleep 10 done } - echo "Checking virtualization module is on" + echo "Waiting for Virtualization module to be ready" if [ "$(kubectl get mc virtualization -o jsonpath='{.spec.enabled}')" != "true" ]; then echo "Virtualization module is not enabled" echo "Enabling virtualization module" kubectl patch mc virtualization -p '{"spec":{"enabled": true}}' --type merge fi - + d8_queue - - # kubectl -n d8-virtualization get pods || echo "ns virtualization is not ready" for i in {1..60}; do virtualization_status=$(kubectl get modules virtualization -o jsonpath='{.status.phase}') @@ -823,9 +800,6 @@ jobs: fi sleep 10 done - - # echo "Wait for pods to be ready" - # kubectl wait --for=condition=Ready pods --all -n d8-virtualization --timeout=600s e2e-test: name: E2E test (${{ inputs.storage_type }}) runs-on: ubuntu-22.04 @@ -890,14 +864,7 @@ jobs: export SKIP_IMMEDIATE_SC_CHECK="yes" fi STORAGE_CLASS_NAME=${{ inputs.nested_storageclass_name }} FOCUS="VirtualMachineConfiguration" task run:ci -v LABELS="Slow" - - # - uses: actions/upload-artifact@v4 - # if: always() - # with: - # name: resources_from_failed_tests_${{ inputs.storage_type }} - # path: ${{ runner.temp }}/e2e_failed__* - # if-no-files-found: ignore - + - name: Save results working-directory: ./test/e2e/ id: report @@ -936,7 +903,6 @@ jobs: path: test/e2e/e2e_summary_${{ inputs.storage_type }}.json if-no-files-found: ignore - undeploy-cluster: name: Undeploy cluster (${{ inputs.storage_type }}) runs-on: ubuntu-latest @@ -968,7 +934,7 @@ jobs: uses: actions/download-artifact@v5 with: name: generated-files-${{ inputs.storage_type }} - path: ${{ env.CLUSTER_TYPE_PATH }}/ + path: ${{ env.SETUP_CLUSTER_TYPE_PATH }}/ - name: Configure kubectl via azure/k8s-set-context@v4 uses: azure/k8s-set-context@v4 @@ -978,6 +944,6 @@ jobs: kubeconfig: ${{ secrets.VIRT_E2E_NIGHTLY_SA_TOKEN }} - name: infra-undeploy - working-directory: ${{ env.CLUSTER_TYPE_PATH }} + working-directory: ${{ env.SETUP_CLUSTER_TYPE_PATH }} run: | task infra-undeploy diff --git a/test/dvp-over-dvp/Taskfile.yaml b/test/dvp-over-dvp/Taskfile.yaml deleted file mode 100644 index cdc348e487..0000000000 --- a/test/dvp-over-dvp/Taskfile.yaml +++ /dev/null @@ -1,244 +0,0 @@ -# https://taskfile.dev - -version: "3" - -vars: - NAMESPACE: - sh: yq eval '.namespace' values.yaml - DEFAULT_USER: - sh: yq eval '.image.defaultUser' values.yaml - TMP_DIR: ./tmp - SSH_DIR: "{{ .TMP_DIR }}/ssh" - SSH_FILE_NAME: cloud - SSH_PUB_KEY_FILE: "{{ .SSH_DIR }}/{{ .SSH_FILE_NAME }}.pub" - SSH_PRIV_KEY_FILE: "{{ .SSH_DIR }}/{{ .SSH_FILE_NAME }}" - GENERATED_VALUES_FILE: tmp/generated-values.yaml - PASSWORD_FILE: "{{ .TMP_DIR }}/password.txt" - PASSWORD_HASH_FILE: "{{ .TMP_DIR }}/password-hash.txt" -tasks: - default: - silent: true - desc: Preflight / Check if all dependencies are installed - cmds: - - | - deps=("kubectl" "jq" "yq" "docker" "helm" "htpasswd") - for dep in "${deps[@]}"; do - if ! command -v "$dep" >/dev/null 2>&1; then - echo "Required utility '$dep' not found!" - exit 1 - fi - done - echo "All dependencies are installed!" - - password-gen: - desc: Preflight / Generate password for admin@deckhouse.io user - cmds: - - mkdir -p {{ .TMP_DIR }} - - date +%s | sha256sum | base64 | head -c 10 > {{ .PASSWORD_FILE }} - - | - echo $(cat {{ .TMP_DIR }}/password.txt) | htpasswd -BinC 10 "" | cut -d: -f2 | base64 -w0 > {{ .PASSWORD_HASH_FILE }} - status: - - test -f "{{ .PASSWORD_FILE }}" - - test -f "{{ .PASSWORD_HASH_FILE }}" - - ssh-gen: - desc: Preflight / Generate ssh keypair for jump-host - cmds: - - mkdir -p "{{ .SSH_DIR }}" - - yes | ssh-keygen -t ed25519 -b 1024 -f {{ .SSH_PRIV_KEY_FILE }} -N "" -C "cloud" -v - - chmod 0600 "{{ .SSH_PUB_KEY_FILE }}" - - chmod 0400 "{{ .SSH_PRIV_KEY_FILE }}" - status: - - test -f "{{ .SSH_PRIV_KEY_FILE }}" - - render-infra: - desc: Preparation / Generate infra manifests - deps: - - ssh-gen - cmds: - - touch {{ .GENERATED_VALUES_FILE }} - - | - export NEW_KUBECONFIG_B64="$(cat {{ .SSH_PUB_KEY_FILE }})" - yq eval --inplace '.sshPublicKey = env(NEW_KUBECONFIG_B64)' {{ .GENERATED_VALUES_FILE }} - - | - export DOMAIN=$(kubectl get mc global -o json | jq '.spec.settings.modules.publicDomainTemplate | split(".")[1:] | join(".")') - yq eval --inplace '.domain = env(DOMAIN)' {{ .GENERATED_VALUES_FILE }} - - helm template dvp-over-dvp-infra ./charts/infra -f values.yaml -f {{ .GENERATED_VALUES_FILE }} > {{ .TMP_DIR }}/infra.yaml - - infra-deploy: - deps: - - render-infra - desc: Deploy infra (Namespace/RBAC/Jumphost) - vars: - start_time: - sh: date +%s - cmds: - - kubectl apply -f {{ .TMP_DIR }}/infra.yaml - - kubectl -n {{ .NAMESPACE }} wait --for=condition=Ready pod -l app=jump-host --timeout=300s - # - kubectl -n {{ .NAMESPACE }} wait --for=condition=Ready pod -l app=nfs-server --timeout=300s - - | - export end_time=$(date +%s) - difference=$((end_time - {{.start_time}})) - date -ud "@$difference" +'%H:%M:%S' - - infra-undeploy: - desc: Destroy infra (Namespace/RBAC/Jumphost/...) - cmds: - - kubectl delete -f {{ .TMP_DIR }}/infra.yaml || true - - kubectl wait --for=delete namespace/{{ .NAMESPACE }} --timeout 300s || true - - render-kubeconfig: - desc: Preparation / Generate kubeconfig (infra required) - vars: - SERVER: - sh: echo https://$(kubectl -n d8-user-authn get ingress kubernetes-api -o json | jq .spec.rules[0].host -r) - CERT: - sh: kubectl -n d8-user-authn get secrets kubernetes-tls -o json | jq '.data."tls.crt"' -r - TOKEN: - sh: kubectl -n {{ .NAMESPACE }} get secret dkp-sa-secret -ojson | jq -r '.data.token' | base64 -d - silent: true - cmds: - - | - cat < {{ .TMP_DIR }}/kubeconfig.yaml - apiVersion: v1 - clusters: - - cluster: - server: {{ .SERVER }} - name: dvp - contexts: - - context: - cluster: dvp - namespace: {{ .NAMESPACE }} - user: {{ .NAMESPACE }}@dvp - name: {{ .NAMESPACE }}@dvp - current-context: {{ .NAMESPACE }}@dvp - kind: Config - preferences: {} - users: - - name: {{ .NAMESPACE }}@dvp - user: - token: {{ .TOKEN }} - EOF - - render-cluster-config: - desc: Preparation / Generate cluster config (infra required) - deps: - - render-kubeconfig - - password-gen - cmds: - - touch {{ .GENERATED_VALUES_FILE }} - - | - export PASSWORD_HASH="$(cat {{ .PASSWORD_HASH_FILE }})" - yq eval --inplace '.passwordHash = env(PASSWORD_HASH)' {{ .GENERATED_VALUES_FILE }} - - | - export NEW_KUBECONFIG_B64="$(cat {{ .TMP_DIR }}/kubeconfig.yaml | base64 -w 0)" - yq eval --inplace '.kubeconfigDataBase64 = env(NEW_KUBECONFIG_B64)' {{ .GENERATED_VALUES_FILE }} - - helm template dvp-over-dvp-cluster-config ./charts/cluster-config -f values.yaml -f {{ .GENERATED_VALUES_FILE }} > {{ .TMP_DIR }}/config.yaml - - dhctl-bootstrap: - desc: Bootstrap DKP over DVP - deps: - - render-cluster-config - vars: - start_time: - sh: date +%s - JUMPHOST_EXT_IP: - sh: kubectl -n {{ .NAMESPACE }} exec deployment/jump-host -- dig @resolver4.opendns.com myip.opendns.com +short - JUMPHOST_NODEPORT: - sh: kubectl -n {{ .NAMESPACE }} get svc jump-host -o json | jq ".spec.ports[] | select(.port==2222) | .nodePort" - cmds: - - | - docker run --pull=always \ - -v "{{ .TMP_DIR }}/config.yaml:/config.yaml" \ - -v "{{ .SSH_DIR }}:/tmp/.ssh/" \ - -v "{{ .TMP_DIR }}/dhctl:/tmp/dhctl/" \ - dev-registry.deckhouse.io/sys/deckhouse-oss/install:{{ .DECKHOUSE_TAG }} \ - dhctl bootstrap \ - --config=/config.yaml \ - --ssh-agent-private-keys=/tmp/.ssh/{{ .SSH_FILE_NAME }} \ - --ssh-user={{ .DEFAULT_USER }} \ - --ssh-bastion-port={{ .JUMPHOST_NODEPORT }} \ - --ssh-bastion-host={{ .JUMPHOST_EXT_IP }} \ - --ssh-bastion-user=user \ - {{.CLI_ARGS}} - - | - export end_time=$(date +%s) - difference=$((end_time - {{.start_time}})) - date -ud "@$difference" +'%H:%M:%S' - - show-connection-info: - desc: Show connection info - vars: - DOMAIN: - sh: yq eval '.domain' {{ .GENERATED_VALUES_FILE }} - PASSWORD: - sh: cat {{ .PASSWORD_FILE }} - silent: true - cmds: - - echo "Connect to master task ssh-to-master" - - echo "Grafana URL https://grafana.{{ .NAMESPACE }}.{{ .DOMAIN }}" - - echo "Default user/password admin@deckhouse.io/{{ .PASSWORD}}" - - install: - cmds: - - task: infra-deploy - - task: dhctl-bootstrap - - task: show-connection-info - - ssh-to-master: - desc: ssh to master - vars: - MASTER_NAME: - sh: kubectl -n {{ .NAMESPACE }} get vm -l dvp.deckhouse.io/node-group=master -o jsonpath="{.items[0].metadata.name}" - cmds: - - /usr/bin/ssh -i {{ .SSH_PRIV_KEY_FILE }} -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o 'ProxyCommand=d8 v port-forward --stdio=true {{ .MASTER_NAME }}.{{ .NAMESPACE }} 22' {{ .DEFAULT_USER }}@{{ .MASTER_NAME }} - - kill-dvp-resources: - cmds: - - kubectl -n {{ .NAMESPACE }} delete vm --all --force --grace-period=0 - - kubectl -n {{ .NAMESPACE }} delete vd --all --force --grace-period=0 - - kubectl -n {{ .NAMESPACE }} delete vmip --all --force --grace-period=0 - - clean: - cmds: - - task: infra-undeploy - - rm -rf "{{ .TMP_DIR }}" - - __ssh-command: - silent: true - internal: true - vars: - MASTER_NAME: - sh: kubectl -n {{ .NAMESPACE }} get vm -l dvp.deckhouse.io/node-group=master -o jsonpath="{.items[0].metadata.name}" - cmds: - - /usr/bin/ssh -t -i {{ .SSH_PRIV_KEY_FILE }} -o LogLevel=ERROR -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o 'ProxyCommand=d8 v port-forward --stdio=true {{ .MASTER_NAME }}.{{ .NAMESPACE }} 22' {{ .DEFAULT_USER }}@{{ .MASTER_NAME }} {{ .CMD }} - - kubectl: - desc: Run kubectl on master. Usage example "task kubectl -- get pods -A" - cmds: - - task: __ssh-command - vars: - CMD: sudo /opt/deckhouse/bin/kubectl {{ .CLI_ARGS }} - - k9s: - desc: Run kubectl on master. Usage example "task kubectl -- get pods -A" - cmds: - - task: __ssh-command - vars: - CMD: TERM=xterm-256color sudo /usr/local/bin/k9s {{ .CLI_ARGS }} - - configure:cluster:sa: - desc: Configure kubeconfig for nested cluster - vars: - script: gen-sa.sh - cmds: - - rsync -azv -e "ssh -i {{ .SSH_PRIV_KEY_FILE }} -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o 'ProxyCommand=d8 v port-forward --stdio=true master-0.{{ .NAMESPACE }} 22'" ./nested-sa-config/{{ .script }} cloud@master-0:/tmp/ - - task: __ssh-command - vars: - CMD: sudo chmod +x /tmp/{{ .script }} - - task: __ssh-command - vars: - CMD: sudo /tmp/{{ .script }} - - task: __ssh-command - vars: - CMD: sudo /opt/deckhouse/bin/kubectl apply -f {{ .config }} diff --git a/test/dvp-over-dvp/charts/cluster-config/.helmignore b/test/dvp-over-dvp/charts/cluster-config/.helmignore deleted file mode 100644 index 0e8a0eb36f..0000000000 --- a/test/dvp-over-dvp/charts/cluster-config/.helmignore +++ /dev/null @@ -1,23 +0,0 @@ -# Patterns to ignore when building packages. -# This supports shell glob matching, relative path matching, and -# negation (prefixed with !). Only one pattern per line. -.DS_Store -# Common VCS dirs -.git/ -.gitignore -.bzr/ -.bzrignore -.hg/ -.hgignore -.svn/ -# Common backup files -*.swp -*.bak -*.tmp -*.orig -*~ -# Various IDEs -.project -.idea/ -*.tmproj -.vscode/ diff --git a/test/dvp-over-dvp/charts/cluster-config/Chart.yaml b/test/dvp-over-dvp/charts/cluster-config/Chart.yaml deleted file mode 100644 index c61a43f29a..0000000000 --- a/test/dvp-over-dvp/charts/cluster-config/Chart.yaml +++ /dev/null @@ -1,24 +0,0 @@ -apiVersion: v2 -name: cluster-config -description: A Helm chart for Kubernetes - -# A chart can be either an 'application' or a 'library' chart. -# -# Application charts are a collection of templates that can be packaged into versioned archives -# to be deployed. -# -# Library charts provide useful utilities or functions for the chart developer. They're included as -# a dependency of application charts to inject those utilities and functions into the rendering -# pipeline. Library charts do not define any templates and therefore cannot be deployed. -type: application - -# This is the chart version. This version number should be incremented each time you make changes -# to the chart and its templates, including the app version. -# Versions are expected to follow Semantic Versioning (https://semver.org/) -version: 0.1.0 - -# This is the version number of the application being deployed. This version number should be -# incremented each time you make changes to the application. Versions are not expected to -# follow Semantic Versioning. They should reflect the version the application is using. -# It is recommended to use it with quotes. -appVersion: "1.16.0" diff --git a/test/dvp-over-dvp/charts/cluster-config/templates/cluster-config.yaml b/test/dvp-over-dvp/charts/cluster-config/templates/cluster-config.yaml deleted file mode 100644 index 19f5e9c944..0000000000 --- a/test/dvp-over-dvp/charts/cluster-config/templates/cluster-config.yaml +++ /dev/null @@ -1,82 +0,0 @@ -apiVersion: deckhouse.io/v1 -kind: ClusterConfiguration -clusterType: Cloud -cloud: - provider: DVP - prefix: {{ .Values.clusterConfigurationPrefix | default "e2e" }} -podSubnetCIDR: 10.112.0.0/16 -serviceSubnetCIDR: 10.223.0.0/16 -kubernetesVersion: "{{ .Values.deckhouse.kubernetesVersion }}" -clusterDomain: "internal.cluster.local" -proxy: - httpProxy: "{{ .Values.deckhouse.httpProxy }}" - httpsProxy: "{{ .Values.deckhouse.httpProxy }}" - noProxy: - - "localhost" - - "127.0.0.1" - - "10.0.0.0/8" - - "172.16.0.0/12" - - "192.168.0.0/16" - - "10.112.0.0/16" - - "10.223.0.0/16" - - docker.io - - ".ubuntu.com" ---- -apiVersion: deckhouse.io/v1 -kind: InitConfiguration -deckhouse: - imagesRepo: dev-registry.deckhouse.io/sys/deckhouse-oss - registryDockerCfg: {{ .Values.deckhouse.registryDockerCfg }} - devBranch: {{ .Values.deckhouse.tag }} ---- -apiVersion: deckhouse.io/v1 -kind: DVPClusterConfiguration -layout: Standard -sshPublicKey: {{ .Values.sshPublicKey }} -masterNodeGroup: - replicas: {{ .Values.instances.masterNodes.count }} - instanceClass: - virtualMachine: - bootloader: {{ .Values.image.bootloader }} - cpu: - cores: {{ .Values.instances.masterNodes.cores }} - coreFraction: {{ .Values.instances.masterNodes.coreFraction }} - memory: - size: {{ .Values.instances.masterNodes.memory }} - ipAddresses: - - Auto - virtualMachineClassName: "{{ .Values.namespace }}-cpu" - rootDisk: - size: 50Gi - storageClass: {{ .Values.storageClass }} - image: - kind: VirtualImage - name: image - etcdDisk: - size: 15Gi - storageClass: {{ .Values.storageClass }} -nodeGroups: -{{- range .Values.instances.additionalNodes }} - - name: {{ .name }} - replicas: {{ .count }} - instanceClass: - virtualMachine: - bootloader: {{ .bootloader }} - cpu: - cores: {{ .cores }} - coreFraction: {{ .coreFraction }} - memory: - size: {{ .memory }} - virtualMachineClassName: "{{ $.Values.namespace }}-cpu" - rootDisk: - size: 50Gi - image: - kind: VirtualImage - name: image - additionalDisks: - - size: 50Gi - storageClass: {{ $.Values.storageClass }} -{{- end }} -provider: - kubeconfigDataBase64: {{ .Values.kubeconfigDataBase64 }} - namespace: {{ .Values.namespace }} diff --git a/test/dvp-over-dvp/charts/cluster-config/templates/disabled-modules.yaml b/test/dvp-over-dvp/charts/cluster-config/templates/disabled-modules.yaml deleted file mode 100644 index 2ae5da7f95..0000000000 --- a/test/dvp-over-dvp/charts/cluster-config/templates/disabled-modules.yaml +++ /dev/null @@ -1,11 +0,0 @@ -{{/* "local-path-provisioner" */}} -{{- $modules := list "upmeter" "pod-reloader" "secret-copier" "namespace-configurator" -}} -{{ range $modules }} ---- -apiVersion: deckhouse.io/v1alpha1 -kind: ModuleConfig -metadata: - name: {{ . }} -spec: - enabled: false -{{ end }} diff --git a/test/dvp-over-dvp/charts/cluster-config/templates/ingress.yaml b/test/dvp-over-dvp/charts/cluster-config/templates/ingress.yaml deleted file mode 100644 index 387a3c89bc..0000000000 --- a/test/dvp-over-dvp/charts/cluster-config/templates/ingress.yaml +++ /dev/null @@ -1,17 +0,0 @@ ---- -apiVersion: deckhouse.io/v1 -kind: IngressNginxController -metadata: - name: main -spec: - inlet: HostPort - enableIstioSidecar: false - ingressClass: nginx - hostPort: - httpPort: 80 - httpsPort: 443 - nodeSelector: - node-role.kubernetes.io/master: '' - tolerations: - - effect: NoSchedule - operator: Exists diff --git a/test/dvp-over-dvp/charts/cluster-config/templates/mc.yaml b/test/dvp-over-dvp/charts/cluster-config/templates/mc.yaml deleted file mode 100644 index 369c2eb09f..0000000000 --- a/test/dvp-over-dvp/charts/cluster-config/templates/mc.yaml +++ /dev/null @@ -1,96 +0,0 @@ ---- -apiVersion: deckhouse.io/v1alpha1 -kind: ModuleConfig -metadata: - name: deckhouse -spec: - version: 1 - enabled: true - settings: - bundle: Default - logLevel: Info ---- -apiVersion: deckhouse.io/v1alpha1 -kind: ModuleConfig -metadata: - name: global -spec: - version: 2 - settings: - {{- if .Values.defaultClusterStorageClass }} - defaultClusterStorageClass: {{ .Values.defaultClusterStorageClass }} - {{- end }} - modules: - publicDomainTemplate: "%s.{{ .Values.namespace }}.{{ .Values.domain }}" - https: - certManager: - clusterIssuerName: selfsigned - # clusterIssuerName: letsencrypt-staging - mode: CertManager ---- -apiVersion: deckhouse.io/v1alpha1 -kind: ModuleConfig -metadata: - name: user-authn -spec: - version: 1 - enabled: true - settings: - controlPlaneConfigurator: - dexCAMode: DoNotNeed - publishAPI: - enabled: true - https: - mode: Global - global: - kubeconfigGeneratorMasterCA: "" ---- -apiVersion: deckhouse.io/v1alpha1 -kind: ModuleConfig -metadata: - name: user-authz -spec: - enabled: true - version: 1 ---- -apiVersion: deckhouse.io/v1alpha1 -kind: ModuleConfig -metadata: - name: cni-cilium -spec: - version: 1 - enabled: true - settings: - tunnelMode: VXLAN ---- -apiVersion: deckhouse.io/v1alpha1 -kind: ModuleConfig -metadata: - name: prompp -spec: - version: 1 - enabled: true ---- -apiVersion: deckhouse.io/v1alpha2 -kind: ModulePullOverride -metadata: - name: prompp -spec: - imageTag: stable - scanInterval: 15s ---- -apiVersion: deckhouse.io/v1alpha1 -kind: ModuleConfig -metadata: - name: snapshot-controller -spec: - version: 1 - enabled: true ---- -apiVersion: deckhouse.io/v1alpha2 -kind: ModulePullOverride -metadata: - name: snapshot-controller -spec: - imageTag: main - scanInterval: 15s diff --git a/test/dvp-over-dvp/charts/cluster-config/templates/nfs.yaml b/test/dvp-over-dvp/charts/cluster-config/templates/nfs.yaml deleted file mode 100644 index a14d46e181..0000000000 --- a/test/dvp-over-dvp/charts/cluster-config/templates/nfs.yaml +++ /dev/null @@ -1,35 +0,0 @@ -{{ if .Values.nfsEnabled }} ---- -apiVersion: deckhouse.io/v1alpha1 -kind: ModuleConfig -metadata: - name: csi-nfs -spec: - source: deckhouse - enabled: true - version: 1 ---- -apiVersion: deckhouse.io/v1alpha2 -kind: ModulePullOverride -metadata: - name: csi-nfs -spec: - imageTag: main - scanInterval: 10m ---- -apiVersion: storage.deckhouse.io/v1alpha1 -kind: NFSStorageClass -metadata: - name: {{ .Values.nfsSC }} -spec: - connection: - host: "nfs-server.{{ .Values.namespace }}.svc.cluster.local" - share: / - nfsVersion: "4.2" - mountOptions: - mountMode: hard - timeout: 60 - retransmissions: 3 - reclaimPolicy: Delete - volumeBindingMode: Immediate -{{ end }} diff --git a/test/dvp-over-dvp/charts/cluster-config/templates/ngc.yaml b/test/dvp-over-dvp/charts/cluster-config/templates/ngc.yaml deleted file mode 100644 index b3006cc249..0000000000 --- a/test/dvp-over-dvp/charts/cluster-config/templates/ngc.yaml +++ /dev/null @@ -1,54 +0,0 @@ ---- -apiVersion: deckhouse.io/v1alpha1 -kind: NodeGroupConfiguration -metadata: - name: qemu-guest-agent-install-ubuntu.sh -spec: - weight: 98 - nodeGroups: ["*"] - bundles: ["ubuntu-lts", "debian"] - content: | - bb-apt-install qemu-guest-agent - systemctl enable --now qemu-guest-agent ---- -apiVersion: deckhouse.io/v1alpha1 -kind: NodeGroupConfiguration -metadata: - name: astra-d8-dm-modules.conf -spec: - weight: 98 - nodeGroups: ["*"] - bundles: ["astra", "ubuntu-lts", "debian"] - content: | - bb-sync-file /etc/modules-load.d/d8-dm-modules.conf - << "EOF" - dm_snapshot - dm_thin_pool - dm_cache - EOF - - systemctl restart systemd-modules-load.service ---- -apiVersion: deckhouse.io/v1alpha1 -kind: NodeGroupConfiguration -metadata: - name: install-tools.sh -spec: - weight: 98 - nodeGroups: ["*"] - bundles: ["*"] - content: | - bb-sync-file /etc/profile.d/01-kubectl-aliases.sh - << "EOF" - source <(/opt/deckhouse/bin/kubectl completion bash) - alias k=kubectl - complete -o default -F __start_kubectl k - EOF - - if [ ! -f /usr/local/bin/k9s ]; then - K9S_URL=$(curl -s https://api.github.com/repos/derailed/k9s/releases/latest | jq '.assets[] | select(.name=="k9s_Linux_amd64.tar.gz") | .browser_download_url' -r) - curl -L "${K9S_URL}" | tar -xz -C /usr/local/bin/ "k9s" - fi - - if [ ! -f /usr/local/bin/stern ]; then - STERN_URL=$(curl -s https://api.github.com/repos/stern/stern/releases/latest | jq '.assets[].browser_download_url | select(. | test("linux_amd64"))' -r) - curl -L "${STERN_URL}" | tar -xz -C /usr/local/bin/ "stern" - fi diff --git a/test/dvp-over-dvp/charts/cluster-config/templates/rbac.yaml b/test/dvp-over-dvp/charts/cluster-config/templates/rbac.yaml deleted file mode 100644 index 6b8998a1e8..0000000000 --- a/test/dvp-over-dvp/charts/cluster-config/templates/rbac.yaml +++ /dev/null @@ -1,20 +0,0 @@ ---- -apiVersion: deckhouse.io/v1 -kind: ClusterAuthorizationRule -metadata: - name: admin -spec: - subjects: - - kind: User - name: admin@deckhouse.io - accessLevel: SuperAdmin - portForwarding: true ---- -apiVersion: deckhouse.io/v1 -kind: User -metadata: - name: admin -spec: - email: admin@deckhouse.io - # echo "t3chn0l0gi4" | htpasswd -BinC 10 "" | cut -d: -f2 | base64 -w0 - password: {{ .Values.passwordHash }} diff --git a/test/dvp-over-dvp/charts/infra/.helmignore b/test/dvp-over-dvp/charts/infra/.helmignore deleted file mode 100644 index 0e8a0eb36f..0000000000 --- a/test/dvp-over-dvp/charts/infra/.helmignore +++ /dev/null @@ -1,23 +0,0 @@ -# Patterns to ignore when building packages. -# This supports shell glob matching, relative path matching, and -# negation (prefixed with !). Only one pattern per line. -.DS_Store -# Common VCS dirs -.git/ -.gitignore -.bzr/ -.bzrignore -.hg/ -.hgignore -.svn/ -# Common backup files -*.swp -*.bak -*.tmp -*.orig -*~ -# Various IDEs -.project -.idea/ -*.tmproj -.vscode/ diff --git a/test/dvp-over-dvp/charts/infra/Chart.yaml b/test/dvp-over-dvp/charts/infra/Chart.yaml deleted file mode 100644 index e0ab20a245..0000000000 --- a/test/dvp-over-dvp/charts/infra/Chart.yaml +++ /dev/null @@ -1,24 +0,0 @@ -apiVersion: v2 -name: infra -description: A Helm chart for Kubernetes - -# A chart can be either an 'application' or a 'library' chart. -# -# Application charts are a collection of templates that can be packaged into versioned archives -# to be deployed. -# -# Library charts provide useful utilities or functions for the chart developer. They're included as -# a dependency of application charts to inject those utilities and functions into the rendering -# pipeline. Library charts do not define any templates and therefore cannot be deployed. -type: application - -# This is the chart version. This version number should be incremented each time you make changes -# to the chart and its templates, including the app version. -# Versions are expected to follow Semantic Versioning (https://semver.org/) -version: 0.1.0 - -# This is the version number of the application being deployed. This version number should be -# incremented each time you make changes to the application. Versions are not expected to -# follow Semantic Versioning. They should reflect the version the application is using. -# It is recommended to use it with quotes. -appVersion: "1.16.0" diff --git a/test/dvp-over-dvp/charts/infra/templates/ingress.yaml b/test/dvp-over-dvp/charts/infra/templates/ingress.yaml deleted file mode 100644 index b813234319..0000000000 --- a/test/dvp-over-dvp/charts/infra/templates/ingress.yaml +++ /dev/null @@ -1,74 +0,0 @@ ---- -apiVersion: v1 -kind: Service -metadata: - name: dvp-over-dvp-80 - namespace: {{ .Values.namespace }} -spec: - ports: - - port: 80 - targetPort: 80 - protocol: TCP - name: http - selector: - dvp.deckhouse.io/node-group: master ---- -apiVersion: v1 -kind: Service -metadata: - name: dvp-over-dvp-443 - namespace: {{ .Values.namespace }} -spec: - ports: - - port: 443 - targetPort: 443 - protocol: TCP - name: https - selector: - dvp.deckhouse.io/node-group: master ---- -apiVersion: networking.k8s.io/v1 -kind: Ingress -metadata: - name: wildcard-https - namespace: {{ .Values.namespace }} - annotations: - nginx.ingress.kubernetes.io/ssl-passthrough: "true" - nginx.ingress.kubernetes.io/backend-protocol: "HTTPS" -spec: - ingressClassName: nginx - rules: - {{- range .Values.ingressHosts }} - - host: "{{ . }}.{{ $.Values.namespace }}.{{ $.Values.domain }}" - http: - paths: - - path: / - pathType: Prefix - backend: - service: - name: dvp-over-dvp-443 - port: - number: 443 - {{- end }} ---- -apiVersion: networking.k8s.io/v1 -kind: Ingress -metadata: - name: wildcard-http - namespace: {{ .Values.namespace }} - annotations: - nginx.ingress.kubernetes.io/ssl-redirect: "false" - nginx.ingress.kubernetes.io/rewrite-target: / -spec: - ingressClassName: nginx - rules: - - host: "*.{{ .Values.namespace }}.{{ .Values.domain }}" - http: - paths: - - path: / - pathType: Prefix - backend: - service: - name: dvp-over-dvp-80 - port: - number: 80 diff --git a/test/dvp-over-dvp/charts/infra/templates/jump-host/deploy.yaml b/test/dvp-over-dvp/charts/infra/templates/jump-host/deploy.yaml deleted file mode 100644 index e76f76dbd0..0000000000 --- a/test/dvp-over-dvp/charts/infra/templates/jump-host/deploy.yaml +++ /dev/null @@ -1,38 +0,0 @@ ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: jump-host - namespace: {{ .Values.namespace }} -spec: - replicas: 1 - selector: - matchLabels: - app: jump-host - template: - metadata: - labels: - app: jump-host - spec: - containers: - - name: jump-host - image: registry-dvp.dev.flant.dev/tools/jump-host:v0.1.2 - imagePullPolicy: Always - resources: - limits: - cpu: "200m" - memory: "200Mi" - requests: - cpu: "200m" - memory: "200Mi" - ports: - - containerPort: 2222 - env: - - name: SSH_KEY - value: "{{ .Values.sshPublicKey }}" - securityContext: - runAsNonRoot: true - runAsUser: 1000 - securityContext: - runAsNonRoot: true - runAsUser: 1000 diff --git a/test/dvp-over-dvp/charts/infra/templates/jump-host/svc.yaml b/test/dvp-over-dvp/charts/infra/templates/jump-host/svc.yaml deleted file mode 100644 index cacb3421ab..0000000000 --- a/test/dvp-over-dvp/charts/infra/templates/jump-host/svc.yaml +++ /dev/null @@ -1,14 +0,0 @@ ---- -apiVersion: v1 -kind: Service -metadata: - name: jump-host - namespace: {{ .Values.namespace }} -spec: - type: NodePort - selector: - app: jump-host - ports: - - protocol: TCP - port: 2222 - targetPort: 2222 diff --git a/test/dvp-over-dvp/charts/infra/templates/nfs-server/deploy.yaml b/test/dvp-over-dvp/charts/infra/templates/nfs-server/deploy.yaml deleted file mode 100644 index 99573c35b2..0000000000 --- a/test/dvp-over-dvp/charts/infra/templates/nfs-server/deploy.yaml +++ /dev/null @@ -1,44 +0,0 @@ -{{ if .Values.nfsEnabled }} ---- -kind: Deployment -apiVersion: apps/v1 -metadata: - name: nfs-server - namespace: {{ .Values.namespace }} -spec: - replicas: 1 - selector: - matchLabels: - app: nfs-server - template: - metadata: - name: nfs-server - labels: - app: nfs-server - spec: - nodeSelector: - "kubernetes.io/os": linux - containers: - - name: nfs-server - image: itsthenetwork/nfs-server-alpine:latest - imagePullPolicy: IfNotPresent - env: - - name: SHARED_DIRECTORY - value: "/exports" - volumeMounts: - - mountPath: /exports - name: nfs-data - ports: - - name: tcp-2049 - containerPort: 2049 - protocol: TCP - - name: udp-111 - containerPort: 111 - protocol: UDP - securityContext: - privileged: true - volumes: - - name: nfs-data - persistentVolumeClaim: - claimName: nfs-data -{{ end }} diff --git a/test/dvp-over-dvp/charts/infra/templates/nfs-server/pvc.yaml b/test/dvp-over-dvp/charts/infra/templates/nfs-server/pvc.yaml deleted file mode 100644 index 430796d9b1..0000000000 --- a/test/dvp-over-dvp/charts/infra/templates/nfs-server/pvc.yaml +++ /dev/null @@ -1,15 +0,0 @@ -{{ if .Values.nfsEnabled }} ---- -apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - name: nfs-data - namespace: {{ .Values.namespace }} -spec: - accessModes: - - ReadWriteOnce - resources: - requests: - storage: 10Gi - storageClassName: {{ .Values.storageClass }} -{{ end }} diff --git a/test/dvp-over-dvp/charts/infra/templates/nfs-server/svc.yaml b/test/dvp-over-dvp/charts/infra/templates/nfs-server/svc.yaml deleted file mode 100644 index a7e850a669..0000000000 --- a/test/dvp-over-dvp/charts/infra/templates/nfs-server/svc.yaml +++ /dev/null @@ -1,21 +0,0 @@ -{{ if .Values.nfsEnabled }} ---- -kind: Service -apiVersion: v1 -metadata: - name: nfs-server - namespace: {{ .Values.namespace }} - labels: - app: nfs-server -spec: - type: ClusterIP - selector: - app: nfs-server - ports: - - name: tcp-2049 - port: 2049 - protocol: TCP - - name: udp-111 - port: 111 - protocol: UDP -{{ end }} diff --git a/test/dvp-over-dvp/charts/infra/templates/ns.yaml b/test/dvp-over-dvp/charts/infra/templates/ns.yaml deleted file mode 100644 index 77db5f9f65..0000000000 --- a/test/dvp-over-dvp/charts/infra/templates/ns.yaml +++ /dev/null @@ -1,4 +0,0 @@ -apiVersion: v1 -kind: Namespace -metadata: - name: {{ .Values.namespace }} diff --git a/test/dvp-over-dvp/charts/infra/templates/rbac/rbac.yaml b/test/dvp-over-dvp/charts/infra/templates/rbac/rbac.yaml deleted file mode 100644 index 9dec96bfa3..0000000000 --- a/test/dvp-over-dvp/charts/infra/templates/rbac/rbac.yaml +++ /dev/null @@ -1,28 +0,0 @@ -apiVersion: v1 -kind: ServiceAccount -metadata: - name: {{ .Values.sa}} - namespace: {{ .Values.namespace }} ---- -apiVersion: v1 -kind: Secret -metadata: - name: {{ .Values.sa}}-secret - namespace: {{ .Values.namespace }} - annotations: - kubernetes.io/service-account.name: {{ .Values.sa}} -type: kubernetes.io/service-account-token ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding -metadata: - name: {{ .Values.sa}}-rb - namespace: {{ .Values.namespace }} -subjects: - - kind: ServiceAccount - name: {{ .Values.sa}} - namespace: {{ .Values.namespace }} -roleRef: - kind: ClusterRole - name: d8:use:role:manager - apiGroup: rbac.authorization.k8s.io diff --git a/test/dvp-over-dvp/charts/infra/templates/vi.yaml b/test/dvp-over-dvp/charts/infra/templates/vi.yaml deleted file mode 100644 index 66034a649d..0000000000 --- a/test/dvp-over-dvp/charts/infra/templates/vi.yaml +++ /dev/null @@ -1,12 +0,0 @@ ---- -apiVersion: virtualization.deckhouse.io/v1alpha2 -kind: VirtualImage -metadata: - name: image - namespace: {{ .Values.namespace }} -spec: - storage: ContainerRegistry - dataSource: - type: HTTP - http: - url: {{ .Values.image.url }} diff --git a/test/dvp-over-dvp/charts/infra/templates/vmc.yaml b/test/dvp-over-dvp/charts/infra/templates/vmc.yaml deleted file mode 100644 index 39330ced39..0000000000 --- a/test/dvp-over-dvp/charts/infra/templates/vmc.yaml +++ /dev/null @@ -1,7 +0,0 @@ -apiVersion: virtualization.deckhouse.io/v1alpha2 -kind: VirtualMachineClass -metadata: - name: "{{ .Values.namespace }}-cpu" -spec: - cpu: - type: Discovery diff --git a/test/dvp-over-dvp/nested-sa-config/gen-sa.sh b/test/dvp-over-dvp/nested-sa-config/gen-sa.sh deleted file mode 100644 index 02e01b5e55..0000000000 --- a/test/dvp-over-dvp/nested-sa-config/gen-sa.sh +++ /dev/null @@ -1,184 +0,0 @@ -#!/usr/bin/env bash - -# Copyright 2025 Flant JSC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -get_current_date() { - date +"%H:%M:%S %d-%m-%Y" -} - -get_timestamp() { - date +%s -} - -# Colors for output -RED='\033[0;31m' -GREEN='\033[0;32m' -YELLOW='\033[1;33m' -BLUE='\033[0;34m' -CYAN='\033[0;36m' -NC='\033[0m' # No Color - -log_info() { - local message="$1" - local timestamp=$(get_current_date) - echo -e "${BLUE}[INFO]${NC} $message" - if [ -n "$LOG_FILE" ]; then - echo "[$timestamp] [INFO] $message" >> "$LOG_FILE" - fi -} - -log_success() { - local message="$1" - local timestamp=$(get_current_date) - echo -e "${GREEN}[SUCCESS]${NC} $message" - if [ -n "$LOG_FILE" ]; then - echo "[$timestamp] [SUCCESS] $message" >> "$LOG_FILE" - fi -} - -log_warning() { - local message="$1" - local timestamp=$(get_current_date) - echo -e "${YELLOW}[WARNING]${NC} $message" - if [ -n "$LOG_FILE" ]; then - echo "[$timestamp] [WARNING] $message" >> "$LOG_FILE" - fi -} - -log_error() { - local message="$1" - local timestamp=$(get_current_date) - echo -e "${RED}[ERROR]${NC} $message" - if [ -n "$LOG_FILE" ]; then - echo "[$timestamp] [ERROR] $message" >> "$LOG_FILE" - fi -} - -exit_trap() { - echo "" - log_info "Exiting..." - echo "" - exit 0 -} - -kubectl() { - sudo /opt/deckhouse/bin/kubectl $@ -} - -trap exit_trap SIGINT SIGTERM - - -SA_NAME=$1 -CLUSTER_PREFIX=$2 -CLUSTER_NAME=$3 -FILE_NAME=$4 - -if [[ -z "$SA_NAME" ]] || [[ -z "$CLUSTER_PREFIX" ]] || [[ -z "$CLUSTER_NAME" ]]; then - log_error "Usage: gen-sa.sh [FILE_NAME]" - exit 1 -fi - -if [[ -z "$FILE_NAME" ]]; then - FILE_NAME=/tmp/kube.config -fi - -SA_TOKEN=virt-${CLUSTER_PREFIX}-${SA_NAME}-token -SA_CAR_NAME=virt-${CLUSTER_PREFIX}-${SA_NAME} - -USER_NAME=${SA_NAME} -CONTEXT_NAME=${CLUSTER_NAME}-${USER_NAME} - -if kubectl cluster-info > /dev/null 2>&1; then - log_success "Access to Kubernetes cluster exists." -else - log_error "No access to Kubernetes cluster or configuration issue." - exit 1 -fi - -sleep 2 -log_info "====" -log_info "Kubeconfig will be created successfully if you connected to k8s cluster via ssh tunnel or directly" -log_info "====" -sleep 2 - - -log_info "Apply SA, Secrets and ClusterAuthorizationRule" -kubectl apply -f -< /etc/ceph/ceph.conf - [global] - mon_host = $(sed 's/[a-z]=//g' /etc/rook/mon-endpoints) - EOF - - cat << EOF > /etc/ceph/ceph.client.admin.keyring - [$ROOK_CEPH_USERNAME] - key = $ROOK_CEPH_SECRET - EOF - env: - - name: ROOK_CEPH_USERNAME - valueFrom: - secretKeyRef: - key: ceph-username - name: rook-ceph-mon - - name: ROOK_CEPH_SECRET - valueFrom: - secretKeyRef: - key: ceph-secret - name: rook-ceph-mon - volumeMounts: - - mountPath: /etc/ceph - name: ceph-config - - mountPath: /etc/rook - name: mon-endpoint-volume - containers: - - name: ceph-tools - command: - - sleep - - infinity - image: quay.io/ceph/ceph:v18.2.2 - terminationMessagePath: /dev/termination-log - terminationMessagePolicy: File - tty: true - workingDir: /var/lib/ceph - volumeMounts: - - mountPath: /etc/ceph - name: ceph-config - - mountPath: /var/lib/ceph - name: homedir - securityContext: - runAsGroup: 167 - runAsNonRoot: true - runAsUser: 167 - volumes: - - name: mon-endpoint-volume - configMap: - defaultMode: 420 - items: - - key: data - path: mon-endpoints - name: rook-ceph-mon-endpoints - - name: ceph-config - emptyDir: {} - - name: homedir - emptyDir: {} diff --git a/test/dvp-over-dvp/storage/ceph/ceph-configure.sh b/test/dvp-over-dvp/storage/ceph/ceph-configure.sh deleted file mode 100644 index aad18a1bf5..0000000000 --- a/test/dvp-over-dvp/storage/ceph/ceph-configure.sh +++ /dev/null @@ -1,85 +0,0 @@ -#!/usr/bin/env bash - -# Copyright 2025 Flant JSC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -ceph_user_pool=ceph-rbd-pool-r2 -echo "Use user $ceph_user_pool" -echo "Set permissions for user $ceph_user_pool (mgr 'allow *' mon 'allow *' osd 'allow *' mds 'allow *')" -usr=$(kubectl -n d8-operator-ceph exec deployments/rook-ceph-tools -c ceph-tools -- \ - ceph auth get-or-create client.$ceph_user_pool mon 'allow *' mgr 'allow *' osd "allow *") -echo "Get fsid" -fsid=$(kubectl -n d8-operator-ceph exec deployments/rook-ceph-tools -c ceph-tools -- ceph fsid) - -userKey="${usr#*key = }" -ceph_monitors_ip=$(kubectl -n d8-operator-ceph get svc | grep mon | awk '{print $3}') -monitors_yaml=$( - for monitor_ip in $ceph_monitors_ip; do - echo " - $monitor_ip:6789" - done -) - -# Verify we have monitors -if [ -z "$monitors_yaml" ]; then - echo "ERROR: No Ceph monitors found" - exit 1 -fi - -echo "Create CephClusterConnection" -kubectl apply -f - <> "${manifest}" ---- -apiVersion: storage.deckhouse.io/v1alpha1 -kind: LVMVolumeGroup -metadata: - name: vg-data-${node_name}-${dev_path} -spec: - actualVGNameOnTheNode: vg-thin-data - type: Local - local: - nodeName: ${dev_node} - blockDeviceSelector: - matchExpressions: - - key: kubernetes.io/metadata.name - operator: In - values: - - ${dev_name} - thinPools: - - name: thin-data - size: ${LVMVG_SIZE} - allocationLimit: 100% -EOF - -done - -kubectl apply -f "${manifest}" diff --git a/test/dvp-over-dvp/storage/sds-replicated/mc.yaml b/test/dvp-over-dvp/storage/sds-replicated/mc.yaml deleted file mode 100644 index b7d6abda99..0000000000 --- a/test/dvp-over-dvp/storage/sds-replicated/mc.yaml +++ /dev/null @@ -1,32 +0,0 @@ ---- -apiVersion: deckhouse.io/v1alpha1 -kind: ModuleConfig -metadata: - name: sds-node-configurator -spec: - version: 1 - enabled: true ---- -apiVersion: deckhouse.io/v1alpha1 -kind: ModuleConfig -metadata: - name: sds-replicated-volume -spec: - version: 1 - enabled: true ---- -apiVersion: deckhouse.io/v1alpha2 -kind: ModulePullOverride -metadata: - name: sds-node-configurator -spec: - imageTag: main - scanInterval: 15s ---- -apiVersion: deckhouse.io/v1alpha2 -kind: ModulePullOverride -metadata: - name: sds-replicated-volume -spec: - imageTag: main - scanInterval: 15s diff --git a/test/dvp-over-dvp/storage/sds-replicated/rsc-gen.sh b/test/dvp-over-dvp/storage/sds-replicated/rsc-gen.sh deleted file mode 100644 index 7d93443620..0000000000 --- a/test/dvp-over-dvp/storage/sds-replicated/rsc-gen.sh +++ /dev/null @@ -1,87 +0,0 @@ -#!/usr/bin/env bash - -# Copyright 2025 Flant JSC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -manifest=sds-rsp-rsc.yaml -replicatedStoragePoolName=thin-data - -pools=$(kubectl get lvmvolumegroup -o json | jq '.items[] | {name: .metadata.name, thinPoolName: .spec.thinPools[0].name}' -rc) - -cat << EOF > "${manifest}" ---- -apiVersion: storage.deckhouse.io/v1alpha1 -kind: ReplicatedStoragePool -metadata: - name: $replicatedStoragePoolName -spec: - type: LVMThin - lvmVolumeGroups: -EOF - -for pool in ${pools}; do - vg_name=$(echo $pool | jq -r '.name'); - pool_node=$(echo $pool | jq -r '.thinPoolName'); - echo "${pool_node} ${vg_name}" -cat << EOF >> "${manifest}" - - name: ${vg_name} - thinPoolName: ${pool_node} -EOF -done - -cat << EOF >> "${manifest}" ---- -apiVersion: storage.deckhouse.io/v1alpha1 -kind: ReplicatedStorageClass -metadata: - name: nested-thin-r2 -spec: - replication: Availability - storagePool: $replicatedStoragePoolName - reclaimPolicy: Delete - volumeAccess: PreferablyLocal - topology: Ignored ---- -apiVersion: storage.deckhouse.io/v1alpha1 -kind: ReplicatedStorageClass -metadata: - name: nested-thin-r1 -spec: - replication: None - storagePool: $replicatedStoragePoolName - reclaimPolicy: Delete - volumeAccess: PreferablyLocal - topology: Ignored ---- -apiVersion: storage.deckhouse.io/v1alpha1 -kind: ReplicatedStorageClass -metadata: - name: nested-thin-r1-immediate -spec: - replication: None - storagePool: $replicatedStoragePoolName - reclaimPolicy: Delete - volumeAccess: Any - topology: Ignored -EOF - -kubectl apply -f ${manifest} - -DEFAULT_STORAGE_CLASS=nested-thin-r1 -kubectl patch mc global --type='json' -p='[{"op": "replace", "path": "/spec/settings/defaultClusterStorageClass", "value": "'"$DEFAULT_STORAGE_CLASS"'"}]' - -sleep 2 -echo "Showing Storage Classes" -kubectl get storageclass -echo " " diff --git a/test/dvp-over-dvp/tools/deckhouse-queue.sh b/test/dvp-over-dvp/tools/deckhouse-queue.sh deleted file mode 100644 index cada5c5a46..0000000000 --- a/test/dvp-over-dvp/tools/deckhouse-queue.sh +++ /dev/null @@ -1,145 +0,0 @@ -#!/usr/bin/env bash - -# Copyright 2025 Flant JSC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# Colors for output -RED='\033[0;31m' -GREEN='\033[0;32m' -YELLOW='\033[1;33m' -BLUE='\033[0;34m' -CYAN='\033[0;36m' -NC='\033[0m' # No Color - -get_current_date() { - date +"%H:%M:%S %d-%m-%Y" -} - -get_timestamp() { - date +%s -} - -log_info() { - local message="$1" - local timestamp=$(get_current_date) - echo -e "[$timestamp] ${BLUE}[INFO]${NC} $message" -} - -log_success() { - local message="$1" - local timestamp=$(get_current_date) - echo -e "[$timestamp] ${GREEN}[SUCCESS]${NC} $message" -} - -log_warning() { - local message="$1" - local timestamp=$(get_current_date) - echo -e "[$timestamp] ${YELLOW}[WARNING]${NC} $message" -} - -log_error() { - local message="$1" - local timestamp=$(get_current_date) - echo -e "[$timestamp] ${RED}[ERROR]${NC} $message" -} - -kubectl() { - /opt/deckhouse/bin/kubectl $@ - # sudo /opt/deckhouse/bin/kubectl $@ -} - -d8() { - /opt/deckhouse/bin/d8 $@ - # sudo /opt/deckhouse/bin/d8 $@ -} - - -d8_queue_main() { - echo "$( d8 p queue main | grep -Po '(?<=length )([0-9]+)' )" -} - -d8_queue_list() { - d8 p queue list | grep -Po '([0-9]+)(?= active)' -} - -d8_queue() { - local count=90 - # local main_queue_ready=false - local list_queue_ready=false - - for i in $(seq 1 $count) ; do - # if [ $(d8_queue_main) == "0" ]; then - # echo "main queue is clear" - # main_queue_ready=true - # else - # echo "Show main queue" - # d8 p queue main | head -n25 || echo "Failed to retrieve main queue" - # fi - - if [ $(d8_queue_list) == "0" ]; then - echo "list queue list is clear" - list_queue_ready=true - else - echo "Show queue list" - d8 p queue list | head -n25 || echo "Failed to retrieve queue" - fi - - if [ "$list_queue_ready" = true ]; then - # if [ "$main_queue_ready" = true ] && [ "$list_queue_ready" = true ]; then - break - fi - echo "Wait until queues are empty ${i}/${count}" - sleep 10 - done -} - -d8_ready() { - local ready=false - local count=60 - common_start_time=$(get_timestamp) - for i in $(seq 1 $count) ; do - start_time=$(get_timestamp) - if kubectl -n d8-system wait deploy/deckhouse --for condition=available --timeout=20s 2>/dev/null; then - ready=true - break - fi - end_time=$(get_timestamp) - difference=$((end_time - start_time)) - log_info "Wait until deckhouse is ready ${i}/${count} after ${difference}s" - if (( i % 5 == 0 )); then - kubectl -n d8-system get pods - d8 p queue list | head -n25 || echo "Failed to retrieve queue" - fi - done - - if [ "$ready" = true ]; then - log_success "Deckhouse is Ready!" - echo "Checking queues" - d8_queue - else - common_end_time=$(get_timestamp) - common_difference=$((common_end_time - common_start_time)) - common_formatted_difference=$(date -u +'%H:%M:%S' -d "@$common_difference") - log_error "Deckhouse is not ready after ${count} attempts and ${common_formatted_difference} time, check its queue for errors:" - d8 p queue main | head -n25 - exit 1 - fi -} - -start_time=$(get_timestamp) -log_info "Checking that deckhouse is ready" -d8_ready -end_time=$(get_timestamp) -difference=$((end_time - start_time)) -log_success "Deckhouse is ready after $(date -ud "@$difference" +'%H:%M:%S')" diff --git a/test/dvp-static-cluster/Taskfile.yaml b/test/dvp-static-cluster/Taskfile.yaml index 8995cca2bf..03a24fcf99 100644 --- a/test/dvp-static-cluster/Taskfile.yaml +++ b/test/dvp-static-cluster/Taskfile.yaml @@ -2,7 +2,6 @@ version: "3" - vars: NAMESPACE: sh: yq eval '.namespace' values.yaml @@ -51,11 +50,10 @@ tasks: - test -f "{{ .PASSWORD_FILE }}" - test -f "{{ .PASSWORD_HASH_FILE }}" - generate-helm-values: desc: Generate helm values {{ .DISCOVERED_VALUES_FILE }} deps: - - password-gen + - password-gen cmds: - touch {{ .DISCOVERED_VALUES_FILE }} - | @@ -73,7 +71,7 @@ tasks: - | export PASSWORD_HASH="$(cat {{ .PASSWORD_HASH_FILE }})" yq eval --inplace '.discovered.passwordHash = env(PASSWORD_HASH)' {{ .DISCOVERED_VALUES_FILE }} - + render-vm-ips: desc: Get VM IPs cmds: @@ -88,9 +86,9 @@ tasks: render-infra: desc: Preparation / Generate infra manifests deps: - - generate-helm-values + - generate-helm-values cmds: - - helm template static-dvp-over-dvp-infra ./charts/infra -f values.yaml -f {{ .DISCOVERED_VALUES_FILE }} >> {{ .TMP_DIR }}/infra.yaml + - helm template static-dvp-over-dvp-infra ./charts/infra -f values.yaml -f {{ .DISCOVERED_VALUES_FILE }} >> {{ .TMP_DIR }}/infra.yaml infra-deploy: deps: @@ -116,26 +114,25 @@ tasks: infra-undeploy: desc: Destroy infra aliases: - - uninstall + - uninstall cmds: - - kubectl delete -f {{ .TMP_DIR }}/infra.yaml || true + - kubectl delete -f {{ .TMP_DIR }}/infra.yaml --timeout 300s || true - kubectl wait --for=delete namespace/{{ .NAMESPACE }} --timeout 300s || true - render-cluster-config: desc: Preparation / Generate cluster config (infra required) deps: - - ssh-gen - - generate-helm-values + - ssh-gen + - generate-helm-values cmds: - - helm template dvp-over-static-dvp-cluster-config ./charts/cluster-config -f values.yaml -f {{ .DISCOVERED_VALUES_FILE }} > {{ .TMP_DIR }}/config.yaml + - helm template dvp-over-static-dvp-cluster-config ./charts/cluster-config -f values.yaml -f {{ .DISCOVERED_VALUES_FILE }} > {{ .TMP_DIR }}/config.yaml render-cluster-manifests: desc: Preparation / Generate cluster config without cluster bootstrap configs (infra required) deps: - - render-cluster-config + - render-cluster-config cmds: - - yq 'select( (.apiVersion + "/" + .kind) != "deckhouse.io/v1/InitConfiguration" and (.apiVersion + "/" + .kind) != "deckhouse.io/v1/ClusterConfiguration" and (.apiVersion + "/" + .kind) != "deckhouse.io/v1/StaticClusterConfiguration" )' {{ .TMP_DIR }}/config.yaml > {{ .TMP_DIR }}/config-manifests.yaml + - yq 'select( (.apiVersion + "/" + .kind) != "deckhouse.io/v1/InitConfiguration" and (.apiVersion + "/" + .kind) != "deckhouse.io/v1/ClusterConfiguration" and (.apiVersion + "/" + .kind) != "deckhouse.io/v1/StaticClusterConfiguration" )' {{ .TMP_DIR }}/config.yaml > {{ .TMP_DIR }}/config-manifests.yaml render-all: desc: Generate all manifests @@ -144,16 +141,6 @@ tasks: - task render-cluster-config - task render-cluster-manifests - # update-cluster: - # desc: Update cluster - # deps: - # - render-cluster-manifests - # cmds: - # - rsync -azv -e "ssh -i {{ .SSH_PRIV_KEY_FILE }} -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o 'ProxyCommand=d8 v port-forward --stdio=true master-0.{{ .NAMESPACE }} 22'" {{ .TMP_DIR }}/config-manifests.yaml {{ .DEFAULT_USER }}@master-0:/tmp/config-manifests.yaml - # - task: __ssh-command - # vars: - # CMD: sudo /opt/deckhouse/bin/kubectl apply -f /tmp/config-manifests.yaml - dhctl-bootstrap: desc: Bootstrap DKP over DVP deps: @@ -207,8 +194,8 @@ tasks: silent: true cmds: - - echo "Connect to master task ssh-to-master" - - | + - echo "Connect to master task ssh-to-master" + - | echo "Host cluster master node: {{ .MASTER_NODE_NAME }}" echo "Namespace: {{ .NAMESPACE }}" echo "OS User: {{ .DEFAULT_USER }}" @@ -220,145 +207,6 @@ tasks: install: cmds: - - task: infra-deploy - - task: dhctl-bootstrap - - task: show-connection-info - - kill-dvp-resources: - cmds: - - kubectl -n {{ .NAMESPACE }} delete vm --all --force --grace-period=0 - - kubectl -n {{ .NAMESPACE }} delete vd --all --force --grace-period=0 - - ssh-to-master: - cmds: - - /usr/bin/ssh -i {{ .SSH_PRIV_KEY_FILE }} -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o 'ProxyCommand=d8 v port-forward --stdio=true master-0.{{ .NAMESPACE }} 22' {{ .DEFAULT_USER }}@master-0 - - ssh-to-worker: - cmds: - - /usr/bin/ssh -i {{ .SSH_PRIV_KEY_FILE }} -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o 'ProxyCommand=d8 v port-forward --stdio=true worker-0.{{ .NAMESPACE }} 22' {{ .DEFAULT_USER }}@worker-0 - - ssh-to-master-via-jumphost: - vars: - SSH_AGENT_SOCK: "{{ .SSH_DIR }}/{{ .SSH_FILE_NAME }}.sock" - JUMPHOST_EXT_IP: - sh: kubectl -n {{ .NAMESPACE }} exec -it deployment/jump-host -- dig @resolver4.opendns.com myip.opendns.com +short - JUMPHOST_NODEPORT: - sh: kubectl -n {{ .NAMESPACE }} get svc jump-host -o json | jq '.spec.ports[] | select(.port==2222) | .nodePort' - MASTER_NODE_IP: - sh: kubectl -n {{ .NAMESPACE }} get vm master-0 -o jsonpath="{.status.ipAddress}" - cmds: - # kill ssh-agent - - ps aux | grep '{{ .SSH_AGENT_SOCK }}' | grep -v grep | awk '{print $2}' | xargs -r kill -9 || true - # remove ssh-agent sock - - rm -rf {{ .SSH_AGENT_SOCK }} || true - # create temp ssh-agent - - eval $(ssh-agent -a {{ .SSH_AGENT_SOCK }}) - # add ssh key to agent - - SSH_AUTH_SOCK={{ .SSH_AGENT_SOCK }} ssh-add {{ .SSH_PRIV_KEY_FILE }} - # check ssh-agent - - SSH_AUTH_SOCK={{ .SSH_AGENT_SOCK }} ssh-add -l - # connect to master via jumphost - - | - SSH_AUTH_SOCK={{ .SSH_AGENT_SOCK }} \ - /usr/bin/ssh \ - -A -vv \ - -J user@{{ .JUMPHOST_EXT_IP }}:{{ .JUMPHOST_NODEPORT }} \ - -o StrictHostKeyChecking=no \ - -o UserKnownHostsFile=/dev/null \ - {{ .DEFAULT_USER }}@{{ .MASTER_NODE_IP}} - - ssh-to-master-via-ws: - vars: - SSH_AGENT_SOCK: "{{ .SSH_DIR }}/{{ .SSH_FILE_NAME }}.sock" - DOMAIN: - sh: yq eval '.discovered.domain' {{ .DISCOVERED_VALUES_FILE }} - MASTER_NODE_IP: - sh: kubectl -n {{ .NAMESPACE }} get vm master-0 -o jsonpath="{.status.ipAddress}" - cmds: - # kill wstunnel - - ps aux | grep wstunnel | grep tcp://9999:127.0.0.1:2222 | awk '{print $2}' | xargs -r kill -9 || true - # start wstunnel - - wstunnel client -L tcp://9999:127.0.0.1:2222 wss://ws.{{ .NAMESPACE }}.{{ .DOMAIN }}:443 & - # kill ssh-agent - - ps aux | grep '{{ .SSH_AGENT_SOCK }}' | grep -v grep | awk '{print $2}' | xargs -r kill -9 || true - # remove ssh-agent sock - - rm -rf {{ .SSH_AGENT_SOCK }} || true - # create temp ssh-agent - - eval $(ssh-agent -a {{ .SSH_AGENT_SOCK }}) - # add ssh key to agent - - SSH_AUTH_SOCK={{ .SSH_AGENT_SOCK }} ssh-add {{ .SSH_PRIV_KEY_FILE }} - # check ssh-agent - - SSH_AUTH_SOCK={{ .SSH_AGENT_SOCK }} ssh-add -l - # # remove known_hosts entry - - ssh-keygen -f "${HOME}/.ssh/known_hosts" -R "[127.0.0.1]:9999" - # connect to master via jumphost - - | - SSH_AUTH_SOCK={{ .SSH_AGENT_SOCK }} \ - /usr/bin/ssh \ - -A \ - -J user@127.0.0.1:9999 \ - -o StrictHostKeyChecking=no \ - -o UserKnownHostsFile=/dev/null \ - {{ .DEFAULT_USER }}@{{ .MASTER_NODE_IP}} - - clean: - cmds: - - task: infra-undeploy - - rm -rf "{{ .TMP_DIR }}" - - __ssh-command: - silent: true - internal: true - cmds: - - /usr/bin/ssh -t -i {{ .SSH_PRIV_KEY_FILE }} -o LogLevel=ERROR -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o 'ProxyCommand=d8 v port-forward --stdio=true master-0.{{ .NAMESPACE }} 22' {{ .DEFAULT_USER }}@master-0 {{ .CMD }} - - kubectl: - desc: Run kubectl on master. Usage example "task kubectl -- get pods -A" - cmds: - - task: __ssh-command - vars: - CMD: sudo /opt/deckhouse/bin/kubectl {{ .CLI_ARGS }} - - k9s: - desc: Run kubectl on master. Usage example "task kubectl -- get pods -A" - cmds: - - task: __ssh-command - vars: - CMD: TERM=xterm-256color sudo /usr/local/bin/k9s {{ .CLI_ARGS }} - - configure:storage:sds-lvg: - desc: Copy storage manifests to master - vars: - script: gen-lvg.sh - config: /tmp/sds-local-lvg.yaml - cmds: - - rsync -azv -e "ssh -i {{ .SSH_PRIV_KEY_FILE }} -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o 'ProxyCommand=d8 v port-forward --stdio=true master-0.{{ .NAMESPACE }} 22'" storage/sds-local-volume/{{ .script }} {{ .DEFAULT_USER }}@master-0:/tmp/ - - task: __ssh-command - vars: - CMD: sudo chmod +x /tmp/{{ .script }} - - task: __ssh-command - vars: - CMD: sudo /tmp/{{ .script }} - - task: __ssh-command - vars: - CMD: sudo /opt/deckhouse/bin/kubectl apply -f {{ .config }} - - configure:storage:local-sc: - desc: Copy storage manifests to master - vars: - script: gen-sc.sh - config: /tmp/sds-local-sc - cmds: - - rsync -azv -e "ssh -i {{ .SSH_PRIV_KEY_FILE }} -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o 'ProxyCommand=d8 v port-forward --stdio=true master-0.{{ .NAMESPACE }} 22'" storage/sds-local-volume/{{ .script }} {{ .DEFAULT_USER }}@master-0:/tmp/ - - task: __ssh-command - vars: - CMD: sudo chmod +x /tmp/{{ .script }} - - task: __ssh-command - vars: - CMD: sudo /tmp/{{ .script }} - - task: __ssh-command - vars: - CMD: sudo /opt/deckhouse/bin/kubectl apply -f {{ .config }}.yaml - - task: __ssh-command - vars: - CMD: sudo /opt/deckhouse/bin/kubectl apply -f {{ .config }}-multi.yaml + - task: infra-deploy + - task: dhctl-bootstrap + - task: show-connection-info diff --git a/test/dvp-static-cluster/charts/cluster-config/templates/cluster-config.yaml b/test/dvp-static-cluster/charts/cluster-config/templates/cluster-config.yaml index d0c490ad6f..b8a515800f 100644 --- a/test/dvp-static-cluster/charts/cluster-config/templates/cluster-config.yaml +++ b/test/dvp-static-cluster/charts/cluster-config/templates/cluster-config.yaml @@ -8,19 +8,15 @@ serviceSubnetCIDR: 10.99.0.0/16 kubernetesVersion: {{ .Values.deckhouse.kubernetesVersion | quote }} clusterDomain: "internal.{{ .Values.discovered.clusterDomain }}" defaultCRI: ContainerdV2 +{{- if .Values.deckhouse.proxy }} proxy: - httpProxy: "{{ .Values.deckhouse.httpProxy }}" - httpsProxy: "{{ .Values.deckhouse.httpProxy }}" - noProxy: - - "localhost" - - "127.0.0.1" - - "10.0.0.0/8" - - "172.16.0.0/12" - - "192.168.0.0/16" - - "10.112.0.0/16" - - "10.223.0.0/16" - - docker.io - - ".ubuntu.com" + httpProxy: "{{ .Values.deckhouse.proxy.httpProxy }}" + httpsProxy: "{{ .Values.deckhouse.proxy.httpsProxy }}" + noProxy: + {{- range .Values.deckhouse.proxy.noProxy }} + - "{{ . }}" + {{- end }} +{{- end }} --- apiVersion: deckhouse.io/v1 kind: InitConfiguration diff --git a/test/dvp-static-cluster/charts/cluster-config/templates/disabled-modules.yaml b/test/dvp-static-cluster/charts/cluster-config/templates/disabled-modules.yaml index d4e82ce103..ea1bd9e0c6 100644 --- a/test/dvp-static-cluster/charts/cluster-config/templates/disabled-modules.yaml +++ b/test/dvp-static-cluster/charts/cluster-config/templates/disabled-modules.yaml @@ -1,4 +1,4 @@ -{{- $modules := list "upmeter" "local-path-provisioner" "pod-reloader" "secret-copier" "namespace-configurator" "dashboard" "console" -}} +{{- $modules := list "upmeter" "local-path-provisioner" "pod-reloader" "secret-copier" "namespace-configurator" "observability" "dashboard" "console" "loki" "log-shipper" -}} {{- range $modules }} --- diff --git a/test/dvp-static-cluster/charts/cluster-config/templates/modules-dvp-base.yaml b/test/dvp-static-cluster/charts/cluster-config/templates/modules-dvp-base.yaml index 186a955850..ab53cb6446 100644 --- a/test/dvp-static-cluster/charts/cluster-config/templates/modules-dvp-base.yaml +++ b/test/dvp-static-cluster/charts/cluster-config/templates/modules-dvp-base.yaml @@ -36,10 +36,6 @@ metadata: name: descheduler spec: enabled: {{ if eq $totalNodes 1 }}false{{ else }}true{{ end }} - -################################################################## -## observability -################################################################## --- apiVersion: deckhouse.io/v1alpha1 kind: ModuleConfig @@ -50,7 +46,6 @@ spec: enabled: true settings: retentionDays: 7 - # storageClass: i-linstor-thin-r1 --- apiVersion: deckhouse.io/v1alpha1 kind: ModuleConfig @@ -81,31 +76,6 @@ metadata: name: node-local-dns spec: enabled: true ---- -apiVersion: deckhouse.io/v1alpha1 -kind: ModuleConfig -metadata: - name: log-shipper -spec: - enabled: true ---- -apiVersion: deckhouse.io/v1alpha1 -kind: ModuleConfig -metadata: - name: loki -spec: - settings: - # storageClass: i-linstor-thin-r1 - diskSizeGigabytes: 50 - retentionPeriodHours: 24 - storeSystemLogs: false - enabled: true - version: 1 - -################################################################## -## storage -################################################################## - --- apiVersion: deckhouse.io/v1alpha1 kind: ModuleConfig @@ -123,48 +93,6 @@ spec: imageTag: main rollback: false scanInterval: 10m0s - -{{ if or .Values.modules.sdsLocalVolumeEnabled .Values.modules.sdsReplicatedVolumeEnabled }} ---- -apiVersion: deckhouse.io/v1alpha1 -kind: ModuleConfig -metadata: - name: sds-node-configurator -spec: - version: 1 - enabled: true ---- -apiVersion: deckhouse.io/v1alpha2 -kind: ModulePullOverride -metadata: - name: sds-node-configurator -spec: - imageTag: main - scanInterval: 15s -{{ end }} - -{{ if .Values.modules.sdsReplicatedVolumeEnabled }} ---- -apiVersion: deckhouse.io/v1alpha1 -kind: ModuleConfig -metadata: - name: sds-replicated-volume -spec: - version: 1 - enabled: true ---- -apiVersion: deckhouse.io/v1alpha2 -kind: ModulePullOverride -metadata: - name: sds-replicated-volume -spec: - imageTag: main - scanInterval: 15s -{{ end }} - -################################################################## -## ingress -################################################################## --- apiVersion: deckhouse.io/v1 kind: IngressNginxController @@ -182,10 +110,6 @@ spec: tolerations: - effect: NoSchedule operator: Exists - -################################################################## -## rbac -################################################################## --- apiVersion: deckhouse.io/v1 kind: ClusterAuthorizationRule diff --git a/test/dvp-static-cluster/charts/cluster-config/templates/modules-minimal.yaml b/test/dvp-static-cluster/charts/cluster-config/templates/modules-minimal.yaml index 46e5f71d8e..e389d9bf51 100644 --- a/test/dvp-static-cluster/charts/cluster-config/templates/modules-minimal.yaml +++ b/test/dvp-static-cluster/charts/cluster-config/templates/modules-minimal.yaml @@ -21,7 +21,6 @@ spec: settings: modules: publicDomainTemplate: "%s.{{ .Values.namespace }}.{{ .Values.discovered.domain }}" - # defaultClusterStorageClass: nfs --- apiVersion: deckhouse.io/v1alpha1 kind: ModuleConfig diff --git a/test/dvp-static-cluster/charts/cluster-config/templates/nfs.yaml b/test/dvp-static-cluster/charts/cluster-config/templates/nfs.yaml deleted file mode 100644 index 138069b3f3..0000000000 --- a/test/dvp-static-cluster/charts/cluster-config/templates/nfs.yaml +++ /dev/null @@ -1,43 +0,0 @@ -{{- if eq .Values.deckhouse.bundle "Default" }} ---- -apiVersion: deckhouse.io/v1alpha1 -kind: ModuleConfig -metadata: - name: csi-nfs -spec: - enabled: true - source: deckhouse - version: 1 ---- -apiVersion: deckhouse.io/v1alpha2 -kind: ModulePullOverride -metadata: - name: csi-nfs -spec: - imageTag: main - scanInterval: 10m ---- -apiVersion: deckhouse.io/v1alpha1 -kind: ModuleConfig -metadata: - name: snapshot-controller -spec: - enabled: true - version: 1 ---- -apiVersion: storage.deckhouse.io/v1alpha1 -kind: NFSStorageClass -metadata: - name: nfs -spec: - connection: - host: {{ include "cluster-config.full-svc-address" (list $ "nfs-server") }} - share: / - nfsVersion: "4.1" - mountOptions: - mountMode: hard - timeout: 60 - retransmissions: 3 - reclaimPolicy: Delete - volumeBindingMode: WaitForFirstConsumer -{{ end -}} diff --git a/test/dvp-static-cluster/charts/cluster-config/templates/ngc.yaml b/test/dvp-static-cluster/charts/cluster-config/templates/ngc.yaml index b225504381..81b59e4edf 100644 --- a/test/dvp-static-cluster/charts/cluster-config/templates/ngc.yaml +++ b/test/dvp-static-cluster/charts/cluster-config/templates/ngc.yaml @@ -18,7 +18,7 @@ metadata: spec: weight: 98 nodeGroups: ["*"] - bundles: ["astra"] + bundles: ["astra", "ubuntu-lts", "debian"] content: | bb-sync-file /etc/modules-load.d/d8-dm-modules.conf - << "EOF" dm_snapshot diff --git a/test/dvp-static-cluster/charts/cluster-config/templates/virtualization.yaml b/test/dvp-static-cluster/charts/cluster-config/templates/virtualization.yaml deleted file mode 100644 index fb63162c53..0000000000 --- a/test/dvp-static-cluster/charts/cluster-config/templates/virtualization.yaml +++ /dev/null @@ -1,33 +0,0 @@ -{{/* -{{- if eq .Values.deckhouse.bundle "Default" }} -{{- if .Values.modules.virtualizationEnabled }} ---- -apiVersion: deckhouse.io/v1alpha1 -kind: ModuleConfig -metadata: - name: virtualization -spec: - enabled: true - settings: - dvcr: - storage: - persistentVolumeClaim: - size: 10Gi - # storageClassName: linstor-thin-r1 - type: PersistentVolumeClaim - virtualMachineCIDRs: - - 192.168.10.0/24 - version: 1 -{{ if not .Values.cse }} ---- -apiVersion: deckhouse.io/v1alpha2 -kind: ModulePullOverride -metadata: - name: virtualization -spec: - imageTag: {{ .Values.virtualization.tag }} - scanInterval: 15s -{{ end -}} -{{ end -}} -{{ end -}} -*/}} \ No newline at end of file diff --git a/test/dvp-static-cluster/charts/infra/templates/jump-host/ingress.yaml b/test/dvp-static-cluster/charts/infra/templates/jump-host/ingress.yaml deleted file mode 100644 index 12f1f11fb0..0000000000 --- a/test/dvp-static-cluster/charts/infra/templates/jump-host/ingress.yaml +++ /dev/null @@ -1,40 +0,0 @@ -{{- define "jump-host.ws-fqdn" -}} -"ws.{{ .Values.namespace }}.{{ .Values.discovered.domain }}" -{{- end }} ---- -apiVersion: networking.k8s.io/v1 -kind: Ingress -metadata: - name: ws - namespace: {{ .Values.namespace }} -spec: - ingressClassName: nginx - rules: - - host: {{ include "jump-host.ws-fqdn" . }} - http: - paths: - - backend: - service: - name: jump-host - port: - number: 8080 - path: / - pathType: ImplementationSpecific - tls: - - hosts: - - {{ include "jump-host.ws-fqdn" . }} - secretName: ws-tls ---- -apiVersion: cert-manager.io/v1 -kind: Certificate -metadata: - name: ws-tls - namespace: {{ .Values.namespace }} -spec: - certificateOwnerRef: false - dnsNames: - - {{ include "jump-host.ws-fqdn" . }} - issuerRef: - kind: ClusterIssuer - name: letsencrypt - secretName: ws-tls diff --git a/test/dvp-static-cluster/storage/ceph/01-mc.yaml b/test/dvp-static-cluster/storage/ceph/01-mc.yaml index e9078890f7..1f711e905e 100644 --- a/test/dvp-static-cluster/storage/ceph/01-mc.yaml +++ b/test/dvp-static-cluster/storage/ceph/01-mc.yaml @@ -15,7 +15,6 @@ metadata: spec: enabled: true source: deckhouse - # source: deckhouse-prod version: 1 --- apiVersion: deckhouse.io/v1alpha2 @@ -24,13 +23,4 @@ metadata: name: csi-ceph spec: imageTag: main - scanInterval: 15s -# --- -# apiVersion: deckhouse.io/v1alpha1 -# kind: ModuleConfig -# metadata: -# name: snapshot-controller -# spec: -# enabled: true -# version: 1 -# source: deckhouse-prod + scanInterval: 10m From dcf3c99448f4156901a4a54f4ba4ea3630eab4a5 Mon Sep 17 00:00:00 2001 From: Nikita Korolev Date: Fri, 19 Dec 2025 16:25:32 +0300 Subject: [PATCH 11/71] static: add podSubnet and service CIDR in values yaml, add labels for jumphost Signed-off-by: Nikita Korolev --- .github/workflows/e2e-matrix.yml | 4 ++++ .github/workflows/e2e-reusable-pipeline.yml | 12 ++++++++++++ .../templates/cluster-config.yaml | 4 ++-- .../charts/infra/templates/jump-host/svc.yaml | 17 +++-------------- 4 files changed, 21 insertions(+), 16 deletions(-) diff --git a/.github/workflows/e2e-matrix.yml b/.github/workflows/e2e-matrix.yml index 0ffea65124..6ac36ef0da 100644 --- a/.github/workflows/e2e-matrix.yml +++ b/.github/workflows/e2e-matrix.yml @@ -40,6 +40,8 @@ jobs: branch: main virtualization_tag: main deckhouse_tag: main + pod_subnet_cidr: 10.88.0.0/16 + service_subnet_cidr: 10.92.0.0/16 default_user: cloud go_version: "1.24.6" e2e_timeout: "3h" @@ -58,6 +60,8 @@ jobs: branch: main virtualization_tag: main deckhouse_tag: main + pod_subnet_cidr: 10.89.0.0/16 + service_subnet_cidr: 10.93.0.0/16 default_user: cloud go_version: "1.24.6" e2e_timeout: "3h" diff --git a/.github/workflows/e2e-reusable-pipeline.yml b/.github/workflows/e2e-reusable-pipeline.yml index d57437da06..d25aae359a 100644 --- a/.github/workflows/e2e-reusable-pipeline.yml +++ b/.github/workflows/e2e-reusable-pipeline.yml @@ -40,6 +40,16 @@ on: type: string default: "main" description: "Deckhouse tag" + pod_subnet_cidr: + required: false + type: string + default: "10.88.0.0/16" + description: "Pod subnet CIDR" + service_subnet_cidr: + required: false + type: string + default: "10.99.0.0/16" + description: "Service subnet CIDR" default_user: required: false type: string @@ -154,6 +164,8 @@ jobs: storageClass: ${defaultStorageClass} sa: dkp-sa deckhouse: + podSubnetCIDR: ${{ inputs.pod_subnet_cidr }} + serviceSubnetCIDR: ${{ inputs.service_subnet_cidr }} tag: ${{ env.DECKHOUSE_TAG }} kubernetesVersion: Automatic registryDockerCfg: ${{ secrets.DEV_REGISTRY_DOCKER_CFG }} diff --git a/test/dvp-static-cluster/charts/cluster-config/templates/cluster-config.yaml b/test/dvp-static-cluster/charts/cluster-config/templates/cluster-config.yaml index b8a515800f..4dd4fca6a7 100644 --- a/test/dvp-static-cluster/charts/cluster-config/templates/cluster-config.yaml +++ b/test/dvp-static-cluster/charts/cluster-config/templates/cluster-config.yaml @@ -2,9 +2,9 @@ apiVersion: deckhouse.io/v1 kind: ClusterConfiguration clusterType: Static -podSubnetCIDR: 10.88.0.0/16 +podSubnetCIDR: {{ .Values.deckhouse.podSubnetCIDR }} podSubnetNodeCIDRPrefix: '24' -serviceSubnetCIDR: 10.99.0.0/16 +serviceSubnetCIDR: {{ .Values.deckhouse.serviceSubnetCIDR }} kubernetesVersion: {{ .Values.deckhouse.kubernetesVersion | quote }} clusterDomain: "internal.{{ .Values.discovered.clusterDomain }}" defaultCRI: ContainerdV2 diff --git a/test/dvp-static-cluster/charts/infra/templates/jump-host/svc.yaml b/test/dvp-static-cluster/charts/infra/templates/jump-host/svc.yaml index 4634ca7313..e795b2aa6f 100644 --- a/test/dvp-static-cluster/charts/infra/templates/jump-host/svc.yaml +++ b/test/dvp-static-cluster/charts/infra/templates/jump-host/svc.yaml @@ -4,6 +4,8 @@ kind: Service metadata: name: jump-host namespace: {{ .Values.namespace }} + labels: + infra: jump-host spec: type: NodePort selector: @@ -13,17 +15,4 @@ spec: protocol: TCP port: 2222 targetPort: 2222 ---- -apiVersion: v1 -kind: Service -metadata: - name: jump-host-ws - namespace: {{ .Values.namespace }} -spec: - selector: - app: jump-host - ports: - - name: ws - protocol: TCP - port: 8080 - targetPort: 8080 + From 5d209ded5c60c32640f58a6dbc38df24ba862147 Mon Sep 17 00:00:00 2001 From: Nikita Korolev Date: Fri, 19 Dec 2025 17:27:30 +0300 Subject: [PATCH 12/71] try dh pr17193 Signed-off-by: Nikita Korolev --- .github/workflows/e2e-matrix.yml | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/.github/workflows/e2e-matrix.yml b/.github/workflows/e2e-matrix.yml index 6ac36ef0da..0f1379fd39 100644 --- a/.github/workflows/e2e-matrix.yml +++ b/.github/workflows/e2e-matrix.yml @@ -30,6 +30,9 @@ defaults: run: shell: bash +env: + DECKHOUSE_TAG: pr17193 + jobs: e2e-ceph: name: E2E Pipeline (Ceph) @@ -39,7 +42,7 @@ jobs: nested_storageclass_name: nested-ceph-pool-r2-csi-rbd branch: main virtualization_tag: main - deckhouse_tag: main + deckhouse_tag: ${DECKHOUSE_TAG} pod_subnet_cidr: 10.88.0.0/16 service_subnet_cidr: 10.92.0.0/16 default_user: cloud @@ -59,7 +62,7 @@ jobs: nested_storageclass_name: nested-thin-r1 branch: main virtualization_tag: main - deckhouse_tag: main + deckhouse_tag: ${DECKHOUSE_TAG} pod_subnet_cidr: 10.89.0.0/16 service_subnet_cidr: 10.93.0.0/16 default_user: cloud From b0ad90675b5ab333cce00cd03d5d32df337be7f9 Mon Sep 17 00:00:00 2001 From: Nikita Korolev Date: Fri, 19 Dec 2025 17:35:29 +0300 Subject: [PATCH 13/71] upd info msg for virtualization module Signed-off-by: Nikita Korolev --- .github/workflows/e2e-matrix.yml | 7 +- .github/workflows/e2e-reusable-pipeline.yml | 98 ++++++++++++--------- test/dvp-static-cluster/Taskfile.yaml | 4 +- 3 files changed, 59 insertions(+), 50 deletions(-) diff --git a/.github/workflows/e2e-matrix.yml b/.github/workflows/e2e-matrix.yml index 0f1379fd39..f9821a779e 100644 --- a/.github/workflows/e2e-matrix.yml +++ b/.github/workflows/e2e-matrix.yml @@ -30,9 +30,6 @@ defaults: run: shell: bash -env: - DECKHOUSE_TAG: pr17193 - jobs: e2e-ceph: name: E2E Pipeline (Ceph) @@ -42,7 +39,7 @@ jobs: nested_storageclass_name: nested-ceph-pool-r2-csi-rbd branch: main virtualization_tag: main - deckhouse_tag: ${DECKHOUSE_TAG} + deckhouse_tag: pr17193 pod_subnet_cidr: 10.88.0.0/16 service_subnet_cidr: 10.92.0.0/16 default_user: cloud @@ -62,7 +59,7 @@ jobs: nested_storageclass_name: nested-thin-r1 branch: main virtualization_tag: main - deckhouse_tag: ${DECKHOUSE_TAG} + deckhouse_tag: pr17193 pod_subnet_cidr: 10.89.0.0/16 service_subnet_cidr: 10.93.0.0/16 default_user: cloud diff --git a/.github/workflows/e2e-reusable-pipeline.yml b/.github/workflows/e2e-reusable-pipeline.yml index d25aae359a..8711774081 100644 --- a/.github/workflows/e2e-reusable-pipeline.yml +++ b/.github/workflows/e2e-reusable-pipeline.yml @@ -505,7 +505,7 @@ jobs: kubectl get ns | grep sds-replicated-volume || echo "Namespaces sds-replicated-volume are not ready" if (( i % 5 == 0 )); then - d8 p queue list | head -n25 || echo "No queues" + d8 s queue list | head -n25 || echo "No queues" fi sleep 10 done @@ -521,7 +521,7 @@ jobs: break fi echo "Wait 10 sec until blockdevices is greater or equal to $workers [${i}/${count}]" - d8 p queue list | head -n25 || echo "No queues" + d8 s queue list | head -n25 || echo "No queues" sleep 10 done @@ -534,7 +534,7 @@ jobs: echo "Show cluster nodes" kubectl get nodes echo "Show deckhouse logs" - d8 p logs | tail -n 100 + d8 s logs | tail -n 100 exit 1 fi @@ -556,7 +556,7 @@ jobs: echo "[DEBUG] Get pods" kubectl -n d8-sds-replicated-volume get pods || true echo "[DEBUG] Show queue (first 25 lines)" - d8 p queue list | head -n 25 || echo "Failed to retrieve list queue" + d8 s queue list | head -n 25 || echo "Failed to retrieve list queue" fi done @@ -574,7 +574,7 @@ jobs: working-directory: ${{ env.SETUP_CLUSTER_TYPE_PATH }}/storage/ceph run: | d8_queue_list() { - d8 p queue list | grep -Po '([0-9]+)(?= active)' || echo "Failed to retrieve list queue" + d8 s queue list | grep -Po '([0-9]+)(?= active)' || echo "Failed to retrieve list queue" } d8_queue() { @@ -587,7 +587,7 @@ jobs: list_queue_ready=true else echo "Show queue list" - d8 p queue list | head -n25 || echo "Failed to retrieve list queue" + d8 s queue list | head -n25 || echo "Failed to retrieve list queue" fi if [[ "$list_queue_ready" = true ]]; then @@ -635,7 +635,7 @@ jobs: echo "Show all ns" kubectl get ns echo "=====" - d8 p queue list | head -n25 || echo "Failed to retrieve list queue" + d8 s queue list | head -n25 || echo "Failed to retrieve list queue" fi sleep 10 done @@ -674,7 +674,7 @@ jobs: echo "[DEBUG] Show ceph in resource modules" kubectl get modules -o wide | grep ceph || echo "Failed to retrieve modules" echo "[DEBUG] Show queue" - d8 p queue list | head -n 25 || echo "Failed to retrieve list queue" + d8 s queue list | head -n 25 || echo "Failed to retrieve list queue" fi echo "[INFO] Wait until all necessary pods are ready ${i}/60" sleep 10 @@ -755,63 +755,75 @@ jobs: - name: Wait for Virtualization to be ready run: | d8_queue_list() { - d8 p queue list | grep -Po '([0-9]+)(?= active)' || echo "Failed to retrieve list queue" + d8 s queue list | grep -Po '([0-9]+)(?= active)' || echo "Failed to retrieve list queue" } d8_queue() { local count=90 - local list_queue_ready=false + local queue_ready=false for i in $(seq 1 $count) ; do if [ $(d8_queue_list) == "0" ]; then echo "Queue list is clear" - list_queue_ready=true + queue_ready=true else echo "Show queue list" - d8 p queue list | head -n25 || echo "Failed to retrieve list queue" + d8 s queue list | head -n25 || echo "Failed to retrieve list queue" fi - if [ "$list_queue_ready" = true ]; then + if [ "$queue_ready" = true ]; then break fi - echo "====" - echo "Wait until queues are empty ${i}/${count}" - echo "====" - kubectl -n d8-virtualization get pods || echo "ns virtualization is not ready" - echo " " + + echo " " + echo "[INFO] Wait until queues are empty ${i}/${count}" if (( i % 5 == 0 )); then - d8 p logs | tail -n 100 - echo "=====" + d8 s logs | tail -n 100 + echo " " fi + sleep 10 + done + } + + virttualization_ready() { + local count=60 + local virtualization_status + for i in $(seq 1 $count) ; do + virtualization_status=$(kubectl get modules virtualization -o jsonpath='{.status.phase}') + if [ "$virtualization_status" == "Ready" ]; then + echo "Virtualization module is ready" + kubectl get modules virtualization + kubectl -n d8-virtualization get pods + echo "[SUCESS] Virtualization module is ready" + break + fi + echo " " + echo "[INFO] Waiting 10s for Virtualization module to be ready (attempt $i/$count)" + if (( i % 5 == 0 )); then + kubectl describe modules virtualization || echo "Module virtualization is not ready" + kubectl get ns d8-virtualization || echo "Namespace virtualization is not ready" + kubectl -n d8-virtualization get pods || echo "Pods in namespace virtualization is not ready" + fi sleep 10 done + echo "[ERROR] Virtualization module deploy failed" + echo "[DEBUG] Show describe virtualization module" + kubectl describe modules virtualization || true + echo "[DEBUG] Check namespace d8-virtualization" + kubectl get ns d8-virtualization || true + echo "[DEBUG] Check pods in namespace d8-virtualization" + kubectl -n d8-virtualization get pods || true + echo "[DEBUG] Show deckhouse logs" + d8 s logs | tail -n 100 + exit 1 } - echo "Waiting for Virtualization module to be ready" - if [ "$(kubectl get mc virtualization -o jsonpath='{.spec.enabled}')" != "true" ]; then - echo "Virtualization module is not enabled" - echo "Enabling virtualization module" - kubectl patch mc virtualization -p '{"spec":{"enabled": true}}' --type merge - fi + echo " " + echo "[INFO] Waiting for Virtualization module to be ready" d8_queue - - for i in {1..60}; do - virtualization_status=$(kubectl get modules virtualization -o jsonpath='{.status.phase}') - if [ "$virtualization_status" == "Ready" ]; then - echo "Virtualization module is ready" - kubectl get modules virtualization - kubectl -n d8-virtualization get pods - break - fi - echo "Waiting 10s for Virtualization module to be ready" - kubectl get modules virtualization - if (( i % 5 == 0 )); then - kubectl get ns d8-virtualization || echo "ns virtualization is not ready" - kubectl -n d8-virtualization get pods || echo "ns virtualization is not ready" - fi - sleep 10 - done + + virttualization_ready e2e-test: name: E2E test (${{ inputs.storage_type }}) runs-on: ubuntu-22.04 diff --git a/test/dvp-static-cluster/Taskfile.yaml b/test/dvp-static-cluster/Taskfile.yaml index 03a24fcf99..4b9a4d9204 100644 --- a/test/dvp-static-cluster/Taskfile.yaml +++ b/test/dvp-static-cluster/Taskfile.yaml @@ -152,7 +152,7 @@ tasks: start_time: sh: date +%s JUMPHOST_EXT_IP: - sh: kubectl -n {{ .NAMESPACE }} exec -it deployment/jump-host -- dig @resolver4.opendns.com myip.opendns.com +short + sh: kubectl -n {{ .NAMESPACE }} exec deployment/jump-host -- dig @resolver4.opendns.com myip.opendns.com +short JUMPHOST_NODEPORT: sh: kubectl -n {{ .NAMESPACE }} get svc jump-host -o json | jq '.spec.ports[] | select(.port==2222) | .nodePort' MASTER_NODE_IP: @@ -186,7 +186,7 @@ tasks: PASSWORD: sh: cat {{ .PASSWORD_FILE }} JUMPHOST_EXT_IP: - sh: kubectl -n {{ .NAMESPACE }} exec -it deployment/jump-host -- dig @resolver4.opendns.com myip.opendns.com +short + sh: kubectl -n {{ .NAMESPACE }} exec deployment/jump-host -- dig @resolver4.opendns.com myip.opendns.com +short JUMPHOST_NODEPORT: sh: kubectl -n {{ .NAMESPACE }} get svc jump-host -o json | jq '.spec.ports[] | select(.port==2222) | .nodePort' MASTER_NODE_NAME: From d1e5a90597f171e1886ef8110771c524656b8ec1 Mon Sep 17 00:00:00 2001 From: Nikita Korolev Date: Fri, 19 Dec 2025 19:28:50 +0300 Subject: [PATCH 14/71] static: refactor wf Signed-off-by: Nikita Korolev --- .github/workflows/e2e-matrix.yml | 12 ------- .github/workflows/e2e-reusable-pipeline.yml | 38 +++++---------------- 2 files changed, 8 insertions(+), 42 deletions(-) diff --git a/.github/workflows/e2e-matrix.yml b/.github/workflows/e2e-matrix.yml index f9821a779e..95a05c1e5b 100644 --- a/.github/workflows/e2e-matrix.yml +++ b/.github/workflows/e2e-matrix.yml @@ -170,9 +170,6 @@ jobs: DATE=$(date +"%Y-%m-%d") COMBINED_SUMMARY="## :dvp: **DVP | End-to-End tests | $DATE**\n\n" - # Parse summaries from job outputs - # ceph_summary=${{ toJSON(needs.e2e-ceph.outputs.e2e-summary) }} - # replicated_summary=${{ toJSON(needs.e2e-replicated.outputs.e2e-summary) }} # Save to json files cat > /tmp/ceph.json << 'EOF' ${{ needs.e2e-ceph.outputs.e2e-summary }} @@ -190,15 +187,6 @@ jobs: parse_summary "$(cat /tmp/replicated.json)" "replicated" fi - # Parse each summary - # if [ -n "$ceph_summary" ] && [ "$ceph_summary" != "null" ]; then - # parse_summary "$ceph_summary" "ceph" - # fi - - # if [ -n "$replicated_summary" ] && [ "$replicated_summary" != "null" ]; then - # parse_summary "$replicated_summary" "replicated" - # fi - COMBINED_SUMMARY+="${markdown_table}\n" echo -e "$COMBINED_SUMMARY" diff --git a/.github/workflows/e2e-reusable-pipeline.yml b/.github/workflows/e2e-reusable-pipeline.yml index 8711774081..9ccfc43ef2 100644 --- a/.github/workflows/e2e-reusable-pipeline.yml +++ b/.github/workflows/e2e-reusable-pipeline.yml @@ -828,7 +828,7 @@ jobs: name: E2E test (${{ inputs.storage_type }}) runs-on: ubuntu-22.04 outputs: - report-summary: ${{ steps.report.outputs.summary }} + report-summary: ${{ steps.e2e-summary.outputs.summary }} needs: - bootstrap - configure-storage @@ -879,9 +879,10 @@ jobs: kubectl get vmclass/generic -o json | jq 'del(.status) | del(.metadata) | .metadata = {"name":"generic-for-e2e","annotations":{"virtualmachineclass.virtualization.deckhouse.io/is-default-class":"true"}} ' | kubectl create -f - - name: Run E2E - id: e2e-tests + id: e2e-summary env: TIMEOUT: ${{ inputs.e2e_timeout }} + CSI: ${{ inputs.storage_type }} working-directory: ./test/e2e/ run: | if [[ "${{ inputs.storage_type }}" == "replicated" ]]; then @@ -889,41 +890,18 @@ jobs: fi STORAGE_CLASS_NAME=${{ inputs.nested_storageclass_name }} FOCUS="VirtualMachineConfiguration" task run:ci -v LABELS="Slow" - - name: Save results - working-directory: ./test/e2e/ - id: report - env: - input_storage_type: ${{ inputs.storage_type }} - if: always() - run: | - if [ -z "$SUMMARY" ]; then - SUMMARY=$(jq -n \ - --arg csi "$input_storage_type" \ - --arg date "$DATE" \ - --arg startTime "$START_TIME" \ - --arg branch "$GITHUB_REF_NAME" \ - --arg status ":question: UNKNOWN" \ - --arg link "$GITHUB_SERVER_URL/$GITHUB_REPOSITORY/actions/runs/$GITHUB_RUN_ID/" \ - '{ - CSI: $csi, - Date: $date, - StartTime: $startTime, - Branch: $branch, - Status: $status, - Link: $link - }' - ) - fi - echo $SUMMARY | jq echo "summary=$SUMMARY" >> $GITHUB_OUTPUT - echo $SUMMARY > "e2e_summary_${{ inputs.storage_type }}_$DATE.json" + summary_file_name="e2e_summary_${{ inputs.storage_type }}_$DATE.json" + echo "report_file_name=${summary_file_name}" >> $GITHUB_OUTPUT + + echo $SUMMARY > "${summary_file_name}" - name: Upload summary test results uses: actions/upload-artifact@v4 id: e2e-summary-artifact if: always() with: - name: e2e_summary_${{ inputs.storage_type }}_${{ env.DATE }} + name: ${{ steps.e2e-summary.outputs.report_file_name }} path: test/e2e/e2e_summary_${{ inputs.storage_type }}.json if-no-files-found: ignore From 7d505c7bfbc64af30326c1588d2015a1c7acd4d6 Mon Sep 17 00:00:00 2001 From: Nikita Korolev Date: Fri, 19 Dec 2025 20:29:45 +0300 Subject: [PATCH 15/71] static: fix virt cert Signed-off-by: Nikita Korolev --- .github/workflows/e2e-reusable-pipeline.yml | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/.github/workflows/e2e-reusable-pipeline.yml b/.github/workflows/e2e-reusable-pipeline.yml index 9ccfc43ef2..4f1de1ee0d 100644 --- a/.github/workflows/e2e-reusable-pipeline.yml +++ b/.github/workflows/e2e-reusable-pipeline.yml @@ -727,6 +727,10 @@ jobs: spec: enabled: true settings: + https: + certManager: + clusterIssuerName: selfsigned + mode: CertManager dvcr: storage: persistentVolumeClaim: @@ -807,6 +811,7 @@ jobs: fi sleep 10 done + echo "[ERROR] Virtualization module deploy failed" echo "[DEBUG] Show describe virtualization module" kubectl describe modules virtualization || true From 95afc30f2930b8a2d4651d25a2c9a04b7f214169 Mon Sep 17 00:00:00 2001 From: Nikita Korolev Date: Fri, 19 Dec 2025 23:08:09 +0300 Subject: [PATCH 16/71] static: fix msg messages, fix conditions,refactor dbg messages Signed-off-by: Nikita Korolev --- .github/workflows/e2e-reusable-pipeline.yml | 69 ++++++++++--------- .../tools/deckhouse-queue.sh | 27 ++------ 2 files changed, 41 insertions(+), 55 deletions(-) diff --git a/.github/workflows/e2e-reusable-pipeline.yml b/.github/workflows/e2e-reusable-pipeline.yml index 4f1de1ee0d..2febaa1cd6 100644 --- a/.github/workflows/e2e-reusable-pipeline.yml +++ b/.github/workflows/e2e-reusable-pipeline.yml @@ -579,23 +579,17 @@ jobs: d8_queue() { local count=90 - local list_queue_ready=false for i in $(seq 1 $count) ; do if [[ "$(d8_queue_list)" == "0" ]]; then - echo "Queue list is clear" - list_queue_ready=true + echo "[SUCESS] Queue is clear" + break else - echo "Show queue list" + echo "[INFO] Show queue list" d8 s queue list | head -n25 || echo "Failed to retrieve list queue" fi - if [[ "$list_queue_ready" = true ]]; then - break - fi - echo "====" - echo "Wait until queues are empty ${i}/${count}" - echo "====" + echo "[INFO] Wait until queues are empty ${i}/${count}" kubectl get ns | grep sds || echo "ns sds is not ready" echo " " sleep 10 @@ -748,13 +742,13 @@ jobs: name: virtualization spec: imageTag: ${{ env.VIRTUALIZATION_TAG }} - scanInterval: 15s + scanInterval: 10m EOF - echo "get mc virtualization" + echo "[INFO] Show module config virtualization info" kubectl get mc virtualization - echo "get mpo virtualization" + echo "[INFO] Show ModulePullOverride virtualization info" kubectl get mpo virtualization - name: Wait for Virtualization to be ready run: | @@ -764,50 +758,51 @@ jobs: d8_queue() { local count=90 - local queue_ready=false for i in $(seq 1 $count) ; do if [ $(d8_queue_list) == "0" ]; then - echo "Queue list is clear" - queue_ready=true + echo "[INFO] Queue list is clear" + break else - echo "Show queue list" + echo "[INFO] Show queue list" d8 s queue list | head -n25 || echo "Failed to retrieve list queue" fi - if [ "$queue_ready" = true ]; then - break - fi - - echo " " echo "[INFO] Wait until queues are empty ${i}/${count}" + if (( i % 5 == 0 )); then - d8 s logs | tail -n 100 - echo " " + echo " " + d8 s logs | tail -n 100 + echo " " fi sleep 10 done } virttualization_ready() { - local count=60 + local count=90 local virtualization_status for i in $(seq 1 $count) ; do virtualization_status=$(kubectl get modules virtualization -o jsonpath='{.status.phase}') if [ "$virtualization_status" == "Ready" ]; then - echo "Virtualization module is ready" + echo "[SUCESS] Virtualization module is ready" kubectl get modules virtualization kubectl -n d8-virtualization get pods - echo "[SUCESS] Virtualization module is ready" break fi - echo " " + echo "[INFO] Waiting 10s for Virtualization module to be ready (attempt $i/$count)" + if (( i % 5 == 0 )); then - kubectl describe modules virtualization || echo "Module virtualization is not ready" - kubectl get ns d8-virtualization || echo "Namespace virtualization is not ready" - kubectl -n d8-virtualization get pods || echo "Pods in namespace virtualization is not ready" + echo " " + echo "[DEBUG] Show additional info" + echo " " + kubectl get ns d8-virtualization || echo "[WARNING] Namespace virtualization is not ready" + echo " " + kubectl -n d8-virtualization get pods || echo "[WARNING] Pods in namespace virtualization is not ready" + kubectl get pvc -n d8-virtualization || echo "[WARNING] PVC in namespace virtualization is not ready" + echo " " fi sleep 10 done @@ -815,10 +810,18 @@ jobs: echo "[ERROR] Virtualization module deploy failed" echo "[DEBUG] Show describe virtualization module" kubectl describe modules virtualization || true - echo "[DEBUG] Check namespace d8-virtualization" + echo "[DEBUG] Show namespace d8-virtualization" kubectl get ns d8-virtualization || true - echo "[DEBUG] Check pods in namespace d8-virtualization" + echo "[DEBUG] Show pods in namespace d8-virtualization" kubectl -n d8-virtualization get pods || true + echo "[DEBUG] Show dvcr info" + kubectl -n d8-virtualization get po -l app=dvcr -o yaml || true + echo " " + kubectl -n d8-virtualization describe po -l app=dvcr || true + echo "[DEBUG] Show pvc in namespace d8-virtualization" + kubectl get pvc -n d8-virtualization || true + echo "[DEBUG] Show storageclasses" + kubectl get storageclasses || true echo "[DEBUG] Show deckhouse logs" d8 s logs | tail -n 100 exit 1 diff --git a/test/dvp-static-cluster/tools/deckhouse-queue.sh b/test/dvp-static-cluster/tools/deckhouse-queue.sh index cada5c5a46..6492aeac0e 100644 --- a/test/dvp-static-cluster/tools/deckhouse-queue.sh +++ b/test/dvp-static-cluster/tools/deckhouse-queue.sh @@ -56,12 +56,10 @@ log_error() { kubectl() { /opt/deckhouse/bin/kubectl $@ - # sudo /opt/deckhouse/bin/kubectl $@ } d8() { /opt/deckhouse/bin/d8 $@ - # sudo /opt/deckhouse/bin/d8 $@ } @@ -75,31 +73,16 @@ d8_queue_list() { d8_queue() { local count=90 - # local main_queue_ready=false - local list_queue_ready=false for i in $(seq 1 $count) ; do - # if [ $(d8_queue_main) == "0" ]; then - # echo "main queue is clear" - # main_queue_ready=true - # else - # echo "Show main queue" - # d8 p queue main | head -n25 || echo "Failed to retrieve main queue" - # fi - if [ $(d8_queue_list) == "0" ]; then - echo "list queue list is clear" - list_queue_ready=true + log_success "Queue is clear" + break else - echo "Show queue list" + log_info "Show queue first 25 lines" d8 p queue list | head -n25 || echo "Failed to retrieve queue" fi - - if [ "$list_queue_ready" = true ]; then - # if [ "$main_queue_ready" = true ] && [ "$list_queue_ready" = true ]; then - break - fi - echo "Wait until queues are empty ${i}/${count}" + log_info "Wait until queues are empty ${i}/${count}" sleep 10 done } @@ -125,7 +108,7 @@ d8_ready() { if [ "$ready" = true ]; then log_success "Deckhouse is Ready!" - echo "Checking queues" + log_info "Checking queues" d8_queue else common_end_time=$(get_timestamp) From 8bfb869ed7c8a04d1b2fc07c75398352ae10321d Mon Sep 17 00:00:00 2001 From: Nikita Korolev Date: Sat, 20 Dec 2025 00:21:46 +0300 Subject: [PATCH 17/71] static: add log category, emoji in vort install Signed-off-by: Nikita Korolev --- .github/workflows/e2e-reusable-pipeline.yml | 113 +++++++++++--------- 1 file changed, 61 insertions(+), 52 deletions(-) diff --git a/.github/workflows/e2e-reusable-pipeline.yml b/.github/workflows/e2e-reusable-pipeline.yml index 2febaa1cd6..578717c3df 100644 --- a/.github/workflows/e2e-reusable-pipeline.yml +++ b/.github/workflows/e2e-reusable-pipeline.yml @@ -468,17 +468,17 @@ jobs: ls -la ~/.kube chmod 600 ~/.kube/config - echo "kubectl get nodes" + echo "[INFO] Show nodes in cluster" kubectl config use-context nested-e2e-nested-sa - # kubectl get nodes + # some times kubectl get nodes returns error, so we need to retry for i in {1..3}; do echo "Attempt $i/3..." if (kubectl get nodes); then - echo "Successfully retrieved nodes." + echo "[SUCCESS] Successfully retrieved nodes." break else - echo "Retrying in 5 seconds..." + echo "[INFO] Retrying in 5 seconds..." sleep 5 fi done @@ -488,20 +488,20 @@ jobs: working-directory: ${{ env.SETUP_CLUSTER_TYPE_PATH }}/storage/sds-replicated run: | kubectl apply -f mc.yaml - echo "Wait for sds-node-configurator" + echo "[INFO] Wait for sds-node-configurator" kubectl wait --for=jsonpath='{.status.phase}'=Ready modules sds-node-configurator --timeout=300s for i in {1..60}; do sds_replicated_volume_status=$(kubectl get ns d8-sds-replicated-volume -o jsonpath='{.status.phase}' || echo "False") if [[ "${sds_replicated_volume_status}" = "Active" ]]; then - echo "Namespaces sds-replicated-volume are Active" + echo "[SUCCESS] Namespaces sds-replicated-volume are Active" kubectl -n d8-sds-replicated-volume get pods break fi - echo "Waiting 10s for sds-replicated-volume to be ready" - echo "get ns" + echo "[INFO] Waiting 10s for sds-replicated-volume to be ready" + echo "[INFO] Show namespaces sds-replicated-volume" kubectl get ns | grep sds-replicated-volume || echo "Namespaces sds-replicated-volume are not ready" if (( i % 5 == 0 )); then @@ -510,7 +510,7 @@ jobs: sleep 10 done - echo "Wait bd" + echo "[INFO] Wait BlockDevice are ready" workers=$(kubectl get nodes -o name | grep worker | wc -l) bdexists=false count=60 @@ -520,43 +520,45 @@ jobs: bdexists=true break fi - echo "Wait 10 sec until blockdevices is greater or equal to $workers [${i}/${count}]" + echo "[INFO] Wait 10 sec until blockdevices is greater or equal to $workers [${i}/${count}]" d8 s queue list | head -n25 || echo "No queues" sleep 10 done if [ $bdexists = false ]; then - echo "Blockdevices is not 3" - echo "Show blockdevice" + echo "[ERROR] Blockdevices is not 3" + echo "[DEBUG] Show blockdevice" kubectl get blockdevice - echo "Show sds namespaces" + echo "[DEBUG] Show sds namespaces" kubectl get ns | grep sds || echo "ns sds is not found" - echo "Show cluster nodes" + echo "[DEBUG] Show cluster nodes" kubectl get nodes - echo "Show deckhouse logs" + echo "[DEBUG] Show deckhouse logs" d8 s logs | tail -n 100 + echo " " exit 1 fi - echo "Wait pods and webhooks sds-replicated pods" + echo "[INFO] Wait pods and webhooks sds-replicated pods" for i in {1..60}; do - echo "Check sds-replicated pods, linstor-node csi-node webhooks" + echo "[INFO] Check sds-replicated pods, linstor-node csi-node webhooks" linstor_node=$(kubectl -n d8-sds-replicated-volume get po 2>/dev/null | grep linstor-node | grep -c Running || echo 0) csi_node=$(kubectl -n d8-sds-replicated-volume get po 2>/dev/null | grep csi-node | grep -c Running || echo 0) webhooks=$(kubectl -n d8-sds-replicated-volume get po 2>/dev/null | grep webhooks | grep -c Running || echo 0) - echo "check if sds-replicated pods are ready" + echo "[INFO] Check if sds-replicated pods are ready" if [[ "${linstor_node}" -ge "${workers}" ]] && [[ "${csi_node}" -ge "${workers}" ]] && [[ "${webhooks}" -ge "1" ]]; then - echo "sds-replicated-volume is ready" + echo "[SUCCESS] sds-replicated-volume is ready" break fi - echo "Waiting 10s for sds-replicated-volume to be ready" + echo "[INFO] Waiting 10s for sds-replicated-volume to be ready" if (( i % 5 == 0 )); then echo "[DEBUG] Get pods" kubectl -n d8-sds-replicated-volume get pods || true echo "[DEBUG] Show queue (first 25 lines)" d8 s queue list | head -n 25 || echo "Failed to retrieve list queue" + echo " " fi done @@ -566,15 +568,15 @@ jobs: chmod +x rsc-gen.sh ./rsc-gen.sh - echo "Enshure that nested storageclasses are created" + echo "[INFO] Enshure that nested storageclasses are created" kubectl get sc | grep nested || echo "No nested storageclasses" - echo "Done" + echo "[SUCCESS] Done" - name: Configure ceph storage if: ${{ inputs.storage_type == 'ceph' }} working-directory: ${{ env.SETUP_CLUSTER_TYPE_PATH }}/storage/ceph run: | d8_queue_list() { - d8 s queue list | grep -Po '([0-9]+)(?= active)' || echo "Failed to retrieve list queue" + d8 s queue list | grep -Po '([0-9]+)(?= active)' || echo "[WARNING] Failed to retrieve list queue" } d8_queue() { @@ -582,11 +584,11 @@ jobs: for i in $(seq 1 $count) ; do if [[ "$(d8_queue_list)" == "0" ]]; then - echo "[SUCESS] Queue is clear" + echo "[SUCCESS] Queue is clear" break else echo "[INFO] Show queue list" - d8 s queue list | head -n25 || echo "Failed to retrieve list queue" + d8 s queue list | head -n25 || echo "[WARNING] Failed to retrieve list queue" fi echo "[INFO] Wait until queues are empty ${i}/${count}" @@ -605,10 +607,10 @@ jobs: kubectl wait --for=jsonpath='{.status.phase}' modulesource deckhouse-prod --timeout=30s kubectl get modulesources - echo "Create ceph operator and csi module config" + echo "[INFO] Create ceph operator and csi module config" kubectl apply -f 01-mc.yaml - echo "Wait while queues are empty" + echo "[INFO] Wait while queues are empty" d8_queue echo "Start wait for ceph operator and csi" @@ -617,56 +619,56 @@ jobs: csi_ceph_status=$(kubectl get module csi-ceph -o jsonpath='{.status.phase}' || echo "False") if [[ "${ceph_operator_status}" = "Active" ]] && [[ "${csi_ceph_status}" = "Ready" ]]; then - echo "Namespaces operator-ceph and csi are Active" + echo "[SUCCESS] Namespaces operator-ceph and csi are Active" break fi echo "Waiting 10s for ceph operator and csi namespaces to be ready" - echo "get ns" - kubectl get ns | grep ceph || echo "Namespaces operator-ceph and csi are not ready" + echo "[INFO] Get namespace" + kubectl get namespace | grep ceph || echo "[WARNING] Namespaces operator-ceph and csi are not ready" if (( i % 5 == 0 )); then - echo "Show all ns" - kubectl get ns - echo "=====" - d8 s queue list | head -n25 || echo "Failed to retrieve list queue" + echo "[DEBUG] Show all namespaces" + kubectl get namespace + echo " " + d8 s queue list | head -n25 || echo "[WARNING] Failed to retrieve list queue" fi sleep 10 done - echo "Create sa" + echo "[INFO] Create ServiceAccounts" kubectl apply -f 02-sa.yaml - echo "Create cm (patch existing for configure rbd support)" + echo "[INFO] Create ConfigMap (patch existing for configure rbd support)" kubectl apply -f 03-cm.yaml - echo "Create cluster" + echo "[INFO] Create Cluster" kubectl apply -f 04-cluster.yaml - echo "get pod in d8-operator-ceph" + echo "[INFO] Get pod in d8-operator-ceph" kubectl -n d8-operator-ceph get po - echo "Wait for ceph operator" + echo "[INFO] Wait for ceph operator" for i in {1..60}; do - echo "Check ceph pods, mon mgr osd" + echo "[INFO] Check ceph pods, mon mgr osd" ceph_mgr=$(kubectl -n d8-operator-ceph get po 2>/dev/null | grep ceph-mgr | grep -c Running || echo 0) ceph_mon=$(kubectl -n d8-operator-ceph get po 2>/dev/null | grep ceph-mon | grep -c Running || echo 0) ceph_osd=$(kubectl -n d8-operator-ceph get po 2>/dev/null | grep ceph-osd | grep -c Running || echo 0) - echo "check if ceph pods are ready" + echo "[INFO] check if ceph pods are ready" if [[ "${ceph_mgr}" -ge "2" ]] && [[ "${ceph_mon}" -ge "3" ]] && [[ "${ceph_osd}" -ge "3" ]]; then echo "Ceph cluster is ready" break fi - echo "Not all pods are ready, ceph_mgr=${ceph_mgr}, ceph_mon=${ceph_mon}, ceph_osd=${ceph_osd}" - echo "Waiting 10s for ceph operator to be ready" + echo "[WARNING] Not all pods are ready, ceph_mgr=${ceph_mgr}, ceph_mon=${ceph_mon}, ceph_osd=${ceph_osd}" + echo "[INFO] Waiting 10s for ceph operator to be ready" kubectl -n d8-operator-ceph get po || echo "Failed to retrieve pods" if (( i % 5 == 0 )); then - echo "[DEBUG] Show ceph ns" + echo "[DEBUG] Show ceph namespace" kubectl get ns | grep ceph || echo "Failed to retrieve ceph ns" - echo "[DEBUG] Show mc ceph" + echo "[DEBUG] Show ModuleConfig ceph" kubectl get mc | grep ceph || echo "Failed to retrieve mc" echo "[DEBUG] Show ceph in resource modules" - kubectl get modules -o wide | grep ceph || echo "Failed to retrieve modules" + kubectl get modules -o wide | grep ceph || echo "Failed to retrieve modules" echo "[DEBUG] Show queue" d8 s queue list | head -n 25 || echo "Failed to retrieve list queue" fi @@ -786,7 +788,7 @@ jobs: for i in $(seq 1 $count) ; do virtualization_status=$(kubectl get modules virtualization -o jsonpath='{.status.phase}') if [ "$virtualization_status" == "Ready" ]; then - echo "[SUCESS] Virtualization module is ready" + echo "[SUCCESS] Virtualization module is ready" kubectl get modules virtualization kubectl -n d8-virtualization get pods break @@ -797,7 +799,6 @@ jobs: if (( i % 5 == 0 )); then echo " " echo "[DEBUG] Show additional info" - echo " " kubectl get ns d8-virtualization || echo "[WARNING] Namespace virtualization is not ready" echo " " kubectl -n d8-virtualization get pods || echo "[WARNING] Pods in namespace virtualization is not ready" @@ -814,16 +815,24 @@ jobs: kubectl get ns d8-virtualization || true echo "[DEBUG] Show pods in namespace d8-virtualization" kubectl -n d8-virtualization get pods || true - echo "[DEBUG] Show dvcr info" - kubectl -n d8-virtualization get po -l app=dvcr -o yaml || true - echo " " - kubectl -n d8-virtualization describe po -l app=dvcr || true + echo "[DEBUG] Show dvcr pod yaml" + echo "::group::📦 dvcr pod yaml" + kubectl -n d8-virtualization get pod -l app=dvcr -o yaml || true + echo "::endgroup::" + echo "[DEBUG] Show dvcr pod describe" + echo "::group::📦 dvcr pod describe" + kubectl -n d8-virtualization describe pod -l app=dvcr || true + echo "::endgroup::" echo "[DEBUG] Show pvc in namespace d8-virtualization" kubectl get pvc -n d8-virtualization || true echo "[DEBUG] Show storageclasses" kubectl get storageclasses || true + echo "[DEBUG] Show queue (first 25 lines)" + d8 s queue list | head -n 25 || echo "[WARNING] Failed to retrieve list queue" echo "[DEBUG] Show deckhouse logs" + echo "::group::📝 deckhouse logs" d8 s logs | tail -n 100 + echo "::endgroup::" exit 1 } From ffcc16ccb101b7c77da46903e27f2e3f740db6f3 Mon Sep 17 00:00:00 2001 From: Nikita Korolev Date: Sat, 20 Dec 2025 00:32:59 +0300 Subject: [PATCH 18/71] static: rename gen-sa to gen-kubeconfig, move to tools Signed-off-by: Nikita Korolev --- .github/workflows/e2e-reusable-pipeline.yml | 77 +++++-------------- .../gen-sa.sh => tools/gen-kubeconfig.sh} | 0 2 files changed, 21 insertions(+), 56 deletions(-) rename test/dvp-static-cluster/{nested-sa-config/gen-sa.sh => tools/gen-kubeconfig.sh} (100%) diff --git a/.github/workflows/e2e-reusable-pipeline.yml b/.github/workflows/e2e-reusable-pipeline.yml index 578717c3df..a2ca6e225d 100644 --- a/.github/workflows/e2e-reusable-pipeline.yml +++ b/.github/workflows/e2e-reusable-pipeline.yml @@ -308,70 +308,34 @@ jobs: -c "$cmd" } - echo "Copy script for generating kubeconfig in nested cluster" - echo "Copy nested-sa-config/gen-sa.sh to master" - d8vscp "./nested-sa-config/gen-sa.sh" "${DEFAULT_USER}@${nested_master}.${NAMESPACE}:/tmp/gen-sa.sh" + echo "[INFO] Copy script for generating kubeconfig in nested cluster" + echo "[INFO] Copy tools/gen-kubeconfig.sh to master" + d8vscp "./tools/gen-kubeconfig.sh" "${DEFAULT_USER}@${nested_master}.${NAMESPACE}:/tmp/gen-kubeconfig.sh" echo "" d8vscp "./tools/deckhouse-queue.sh" "${DEFAULT_USER}@${nested_master}.${NAMESPACE}:/tmp/deckhouse-queue.sh" echo "" - echo "Set file exec permissions" - d8vssh 'chmod +x /tmp/{gen-sa.sh,deckhouse-queue.sh}' + echo "[INFO] Set file exec permissions" + d8vssh 'chmod +x /tmp/{gen-kubeconfig.sh,deckhouse-queue.sh}' d8vssh 'ls -la /tmp/' - echo "Check d8 queue" + echo "[INFO] Check d8 queue in nested cluster" d8vssh 'sudo /tmp/deckhouse-queue.sh' - # d8 v ssh -i ./tmp/ssh/cloud \ - # --local-ssh=true \ - # --local-ssh-opts="-o StrictHostKeyChecking=no" \ - # --local-ssh-opts="-o UserKnownHostsFile=/dev/null" \ - # ${DEFAULT_USER}@${nested_master}.${NAMESPACE} \ - # -c 'chmod +x /tmp/{gen-sa.sh,deckhouse-queue.sh}' - # echo "" - - # d8 v ssh -i ./tmp/ssh/cloud \ - # --local-ssh=true \ - # --local-ssh-opts="-o StrictHostKeyChecking=no" \ - # --local-ssh-opts="-o UserKnownHostsFile=/dev/null" \ - # ${DEFAULT_USER}@${nested_master}.${NAMESPACE} \ - # -c 'ls -la /tmp/' - # echo "===" - - # echo "Check d8 queue" - # d8 v ssh -i ./tmp/ssh/cloud \ - # --local-ssh=true \ - # --local-ssh-opts="-o StrictHostKeyChecking=no" \ - # --local-ssh-opts="-o UserKnownHostsFile=/dev/null" \ - # ${DEFAULT_USER}@${nested_master}.${NAMESPACE} \ - # -c 'sudo /tmp/deckhouse-queue.sh' - - echo "Generate kube conf in nested cluster" - echo "run nested-sa-config/gen-sa.sh" - - # "Usage: gen-sa.sh [FILE_NAME]" - echo "===" - # d8 v ssh -i ./tmp/ssh/cloud \ - # --local-ssh=true \ - # --local-ssh-opts="-o StrictHostKeyChecking=no" \ - # --local-ssh-opts="-o UserKnownHostsFile=/dev/null" \ - # ${DEFAULT_USER}@${nested_master}.$NAMESPACE \ - # -c "sudo /tmp/gen-sa.sh nested-sa nested nested-e2e /${kubeConfigPath}" - - d8vssh "sudo /tmp/gen-sa.sh nested-sa nested nested-e2e /${kubeConfigPath}" - - echo "'sudo /tmp/gen-sa.sh nested-sa nested nested-e2e /${kubeConfigPath}' - done" + echo "[INFO] Generate kube conf in nested cluster" + echo "[INFO] Run gen-kubeconfig.sh in nested cluster" + d8vssh "sudo /tmp/gen-kubeconfig.sh nested-sa nested nested-e2e /${kubeConfigPath}" echo "" - echo "Copy kubeconfig to runner" - echo "${DEFAULT_USER}@${nested_master}.$NAMESPACE:/${kubeConfigPath} ./${kubeConfigPath}" + echo "[INFO] Copy kubeconfig to runner" + echo "[INFO] ${DEFAULT_USER}@${nested_master}.$NAMESPACE:/${kubeConfigPath} ./${kubeConfigPath}" d8vscp "${DEFAULT_USER}@${nested_master}.$NAMESPACE:/${kubeConfigPath}" "./${kubeConfigPath}" - echo "=== Set rights for kubeconfig ===" - echo "sudo chown 1001:1001 ${kubeConfigPath}" + echo "[INFO] Set rights for kubeconfig" + echo "[INFO] sudo chown 1001:1001 ${kubeConfigPath}" sudo chown 1001:1001 ${kubeConfigPath} - echo "rights - done" + echo " " - echo "Kubeconf to github output" + echo "[INFO] Kubeconf to github output" CONFIG=$(cat ${kubeConfigPath} | base64 -w 0) CONFIG=$(echo $CONFIG | base64 -w 0) echo "config=$CONFIG" >> $GITHUB_OUTPUT @@ -461,11 +425,12 @@ jobs: - name: Check kubeconfig run: | mkdir -p ~/.kube - echo "Configure kube config" + echo "[INFO] Configure kubeconfig for nested cluster" echo "${{ needs.bootstrap.outputs.kubeconfig-content }}" | base64 -d | base64 -d > ~/.kube/config - echo "Show paths and files content" + echo "[INFO] Show paths and files content" ls -la ~/.kube + echo "[INFO] Set permissions for kubeconfig" chmod 600 ~/.kube/config echo "[INFO] Show nodes in cluster" @@ -569,7 +534,7 @@ jobs: ./rsc-gen.sh echo "[INFO] Enshure that nested storageclasses are created" - kubectl get sc | grep nested || echo "No nested storageclasses" + kubectl get storageclass | grep nested || echo "[WARNING] No nested storageclasses" echo "[SUCCESS] Done" - name: Configure ceph storage if: ${{ inputs.storage_type == 'ceph' }} @@ -602,7 +567,7 @@ jobs: yq e '.spec.registry.dockerCfg = env(registry)' -i 00-ms.yaml unset registry - echo "Create prod module source" + echo "[INFO] Create prod module source" kubectl apply -f 00-ms.yaml kubectl wait --for=jsonpath='{.status.phase}' modulesource deckhouse-prod --timeout=30s kubectl get modulesources @@ -623,7 +588,7 @@ jobs: break fi - echo "Waiting 10s for ceph operator and csi namespaces to be ready" + echo "[INFO] Waiting 10s for ceph operator and csi namespaces to be ready" echo "[INFO] Get namespace" kubectl get namespace | grep ceph || echo "[WARNING] Namespaces operator-ceph and csi are not ready" diff --git a/test/dvp-static-cluster/nested-sa-config/gen-sa.sh b/test/dvp-static-cluster/tools/gen-kubeconfig.sh similarity index 100% rename from test/dvp-static-cluster/nested-sa-config/gen-sa.sh rename to test/dvp-static-cluster/tools/gen-kubeconfig.sh From a9df5654d238fe534228363159b47882566e1d1c Mon Sep 17 00:00:00 2001 From: Nikita Korolev Date: Sat, 20 Dec 2025 11:45:39 +0300 Subject: [PATCH 19/71] static: comment https for virt, add dbg Signed-off-by: Nikita Korolev --- .github/workflows/e2e-reusable-pipeline.yml | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/.github/workflows/e2e-reusable-pipeline.yml b/.github/workflows/e2e-reusable-pipeline.yml index a2ca6e225d..84891cc49b 100644 --- a/.github/workflows/e2e-reusable-pipeline.yml +++ b/.github/workflows/e2e-reusable-pipeline.yml @@ -688,10 +688,10 @@ jobs: spec: enabled: true settings: - https: - certManager: - clusterIssuerName: selfsigned - mode: CertManager + # https: + # certManager: + # clusterIssuerName: selfsigned + # mode: CertManager dvcr: storage: persistentVolumeClaim: @@ -775,7 +775,9 @@ jobs: echo "[ERROR] Virtualization module deploy failed" echo "[DEBUG] Show describe virtualization module" + echo "::group::📦 describe virtualization module" kubectl describe modules virtualization || true + echo "::endgroup::" echo "[DEBUG] Show namespace d8-virtualization" kubectl get ns d8-virtualization || true echo "[DEBUG] Show pods in namespace d8-virtualization" From 9deeb3ad5f4a482dc21bc970fa14d3f098032bf0 Mon Sep 17 00:00:00 2001 From: Nikita Korolev Date: Sat, 20 Dec 2025 11:51:09 +0300 Subject: [PATCH 20/71] static: add label deploy jump-his, fix d8queue Signed-off-by: Nikita Korolev --- .github/workflows/e2e-reusable-pipeline.yml | 4 ++-- .../charts/infra/templates/jump-host/deploy.yaml | 2 ++ 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/.github/workflows/e2e-reusable-pipeline.yml b/.github/workflows/e2e-reusable-pipeline.yml index 84891cc49b..0dc6a72f0c 100644 --- a/.github/workflows/e2e-reusable-pipeline.yml +++ b/.github/workflows/e2e-reusable-pipeline.yml @@ -557,7 +557,7 @@ jobs: fi echo "[INFO] Wait until queues are empty ${i}/${count}" - kubectl get ns | grep sds || echo "ns sds is not ready" + kubectl get ns | grep sds || echo "Namespaces sds not found" echo " " sleep 10 done @@ -728,7 +728,7 @@ jobs: for i in $(seq 1 $count) ; do if [ $(d8_queue_list) == "0" ]; then - echo "[INFO] Queue list is clear" + echo "[SUCCESS] Queues are empty" break else echo "[INFO] Show queue list" diff --git a/test/dvp-static-cluster/charts/infra/templates/jump-host/deploy.yaml b/test/dvp-static-cluster/charts/infra/templates/jump-host/deploy.yaml index 419e62478b..4c2f742e10 100644 --- a/test/dvp-static-cluster/charts/infra/templates/jump-host/deploy.yaml +++ b/test/dvp-static-cluster/charts/infra/templates/jump-host/deploy.yaml @@ -4,6 +4,8 @@ kind: Deployment metadata: name: jump-host namespace: {{ .Values.namespace }} + labels: + infra: jump-host spec: replicas: 1 selector: From b44d81291f00b33c9d48f6d99c62f293c7119cdb Mon Sep 17 00:00:00 2001 From: Nikita Korolev Date: Sat, 20 Dec 2025 12:39:26 +0300 Subject: [PATCH 21/71] fix wait ceph Signed-off-by: Nikita Korolev --- .github/workflows/e2e-reusable-pipeline.yml | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/.github/workflows/e2e-reusable-pipeline.yml b/.github/workflows/e2e-reusable-pipeline.yml index 0dc6a72f0c..15ad4d2e5f 100644 --- a/.github/workflows/e2e-reusable-pipeline.yml +++ b/.github/workflows/e2e-reusable-pipeline.yml @@ -617,10 +617,14 @@ jobs: ceph_mgr=$(kubectl -n d8-operator-ceph get po 2>/dev/null | grep ceph-mgr | grep -c Running || echo 0) ceph_mon=$(kubectl -n d8-operator-ceph get po 2>/dev/null | grep ceph-mon | grep -c Running || echo 0) ceph_osd=$(kubectl -n d8-operator-ceph get po 2>/dev/null | grep ceph-osd | grep -c Running || echo 0) + + ceph_mgr=$((ceph_mgr)) + ceph_mon=$((ceph_mon)) + ceph_osd=$((ceph_osd)) echo "[INFO] check if ceph pods are ready" - if [[ "${ceph_mgr}" -ge "2" ]] && [[ "${ceph_mon}" -ge "3" ]] && [[ "${ceph_osd}" -ge "3" ]]; then - echo "Ceph cluster is ready" + if [[ $ceph_mgr -ge 2 && $ceph_mon -ge 3 && $ceph_osd -ge 3 ]]; then + echo "[SUCCESS] Ceph cluster is ready" break fi @@ -645,8 +649,10 @@ jobs: kubectl get pods -n d8-operator-ceph kubectl apply -f 05-blockpool.yaml + echo "[INFO] Wait for ceph-rbd-pool-r2 blockpool to be ready, timeout 600s" + kubectl -n d8-operator-ceph wait --for=jsonpath='{.status.phase}'=Ready cephblockpools.ceph.rook.io ceph-rbd-pool-r2 --timeout=600s kubectl apply -f 06-toolbox.yaml - echo "Wait for rook-ceph-tools, timeout 300s" + echo "[INFO] Wait for rook-ceph-tools, timeout 300s" kubectl -n d8-operator-ceph wait --for=condition=Available deployment/rook-ceph-tools --timeout=300s echo "Show ceph pools via rook-ceph-tools" From 87a7c20f414e26dbca155b093249fe665afefb46 Mon Sep 17 00:00:00 2001 From: Nikita Korolev Date: Sat, 20 Dec 2025 12:58:42 +0300 Subject: [PATCH 22/71] fix sds wait Signed-off-by: Nikita Korolev --- .github/workflows/e2e-reusable-pipeline.yml | 21 +++++++++++++-------- 1 file changed, 13 insertions(+), 8 deletions(-) diff --git a/.github/workflows/e2e-reusable-pipeline.yml b/.github/workflows/e2e-reusable-pipeline.yml index 15ad4d2e5f..dbb0ae356f 100644 --- a/.github/workflows/e2e-reusable-pipeline.yml +++ b/.github/workflows/e2e-reusable-pipeline.yml @@ -476,7 +476,8 @@ jobs: done echo "[INFO] Wait BlockDevice are ready" - workers=$(kubectl get nodes -o name | grep worker | wc -l) + workers=$(kubectl get nodes -o name | grep -c worker) + workers=$((workers)) bdexists=false count=60 for i in $(seq 1 $count); do @@ -510,9 +511,13 @@ jobs: linstor_node=$(kubectl -n d8-sds-replicated-volume get po 2>/dev/null | grep linstor-node | grep -c Running || echo 0) csi_node=$(kubectl -n d8-sds-replicated-volume get po 2>/dev/null | grep csi-node | grep -c Running || echo 0) webhooks=$(kubectl -n d8-sds-replicated-volume get po 2>/dev/null | grep webhooks | grep -c Running || echo 0) + + linstor_node=$((linstor_node)) + csi_node=$((csi_node)) + webhooks=$((webhooks)) echo "[INFO] Check if sds-replicated pods are ready" - if [[ "${linstor_node}" -ge "${workers}" ]] && [[ "${csi_node}" -ge "${workers}" ]] && [[ "${webhooks}" -ge "1" ]]; then + if [[ "${linstor_node}" -ge ${workers} && "${csi_node}" -ge ${workers} && ${webhooks} -ge 1 ]]; then echo "[SUCCESS] sds-replicated-volume is ready" break fi @@ -583,7 +588,7 @@ jobs: ceph_operator_status=$(kubectl get ns d8-operator-ceph -o jsonpath='{.status.phase}' || echo "False") csi_ceph_status=$(kubectl get module csi-ceph -o jsonpath='{.status.phase}' || echo "False") - if [[ "${ceph_operator_status}" = "Active" ]] && [[ "${csi_ceph_status}" = "Ready" ]]; then + if [[ "${ceph_operator_status}" = "Active" && "${csi_ceph_status}" = "Ready" ]]; then echo "[SUCCESS] Namespaces operator-ceph and csi are Active" break fi @@ -645,7 +650,7 @@ jobs: sleep 10 done - echo "Show pods" + echo "[INFO] Show pods" kubectl get pods -n d8-operator-ceph kubectl apply -f 05-blockpool.yaml @@ -655,10 +660,10 @@ jobs: echo "[INFO] Wait for rook-ceph-tools, timeout 300s" kubectl -n d8-operator-ceph wait --for=condition=Available deployment/rook-ceph-tools --timeout=300s - echo "Show ceph pools via rook-ceph-tools" + echo "[INFO] Show ceph pools via rook-ceph-tools" kubectl -n d8-operator-ceph exec deployments/rook-ceph-tools -c ceph-tools -- ceph osd pool ls - echo "Configure storage class" + echo "[INFO] Configure storage class" chmod +x ./ceph-configure.sh ./ceph-configure.sh @@ -677,7 +682,7 @@ jobs: - name: Check kubeconfig run: | - echo "Configure kube config" + echo "[INFO] Configure kube config" mkdir -p ~/.kube echo "${{ needs.bootstrap.outputs.kubeconfig-content }}" | base64 -d | base64 -d > ~/.kube/config chmod 600 ~/.kube/config @@ -685,7 +690,7 @@ jobs: - name: Configure Virtualization run: | - echo "Apply Virtualization module config" + echo "[INFO] Apply Virtualization module config" kubectl apply -f -< Date: Sat, 20 Dec 2025 14:01:37 +0300 Subject: [PATCH 23/71] fix Signed-off-by: Nikita Korolev --- .github/workflows/e2e-reusable-pipeline.yml | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/.github/workflows/e2e-reusable-pipeline.yml b/.github/workflows/e2e-reusable-pipeline.yml index dbb0ae356f..537fb0f3b8 100644 --- a/.github/workflows/e2e-reusable-pipeline.yml +++ b/.github/workflows/e2e-reusable-pipeline.yml @@ -487,7 +487,9 @@ jobs: break fi echo "[INFO] Wait 10 sec until blockdevices is greater or equal to $workers [${i}/${count}]" - d8 s queue list | head -n25 || echo "No queues" + if (( i % 5 == 0 )); then + d8 s queue list | head -n25 || echo "No queues" + fi sleep 10 done @@ -619,9 +621,9 @@ jobs: echo "[INFO] Wait for ceph operator" for i in {1..60}; do echo "[INFO] Check ceph pods, mon mgr osd" - ceph_mgr=$(kubectl -n d8-operator-ceph get po 2>/dev/null | grep ceph-mgr | grep -c Running || echo 0) - ceph_mon=$(kubectl -n d8-operator-ceph get po 2>/dev/null | grep ceph-mon | grep -c Running || echo 0) - ceph_osd=$(kubectl -n d8-operator-ceph get po 2>/dev/null | grep ceph-osd | grep -c Running || echo 0) + ceph_mgr=$(kubectl -n d8-operator-ceph get po 2>/dev/null | grep ceph-mgr | grep -c Running) + ceph_mon=$(kubectl -n d8-operator-ceph get po 2>/dev/null | grep ceph-mon | grep -c Running) + ceph_osd=$(kubectl -n d8-operator-ceph get po 2>/dev/null | grep ceph-osd | grep -c Running) ceph_mgr=$((ceph_mgr)) ceph_mon=$((ceph_mon)) From 1be70cbd9d58b88de2b97e9a84e577539a738ca7 Mon Sep 17 00:00:00 2001 From: Nikita Korolev Date: Sun, 21 Dec 2025 22:19:21 +0300 Subject: [PATCH 24/71] static: rm cse condition Signed-off-by: Nikita Korolev --- .../charts/cluster-config/templates/modules-minimal.yaml | 3 --- 1 file changed, 3 deletions(-) diff --git a/test/dvp-static-cluster/charts/cluster-config/templates/modules-minimal.yaml b/test/dvp-static-cluster/charts/cluster-config/templates/modules-minimal.yaml index e389d9bf51..902ebafd0e 100644 --- a/test/dvp-static-cluster/charts/cluster-config/templates/modules-minimal.yaml +++ b/test/dvp-static-cluster/charts/cluster-config/templates/modules-minimal.yaml @@ -7,9 +7,6 @@ spec: version: 1 enabled: true settings: -{{- if .Values.cse }} - allowExperimentalModules: true -{{- end }} bundle: {{ .Values.deckhouse.bundle }} --- apiVersion: deckhouse.io/v1alpha1 From 4ce7fbb6795787a68a349fc023609b2bc330118d Mon Sep 17 00:00:00 2001 From: Nikita Korolev Date: Sun, 21 Dec 2025 22:22:40 +0300 Subject: [PATCH 25/71] static: fix gloabl cfg, use selfsingeg cert Signed-off-by: Nikita Korolev --- .../charts/cluster-config/templates/modules-minimal.yaml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/test/dvp-static-cluster/charts/cluster-config/templates/modules-minimal.yaml b/test/dvp-static-cluster/charts/cluster-config/templates/modules-minimal.yaml index 902ebafd0e..bd3568dc19 100644 --- a/test/dvp-static-cluster/charts/cluster-config/templates/modules-minimal.yaml +++ b/test/dvp-static-cluster/charts/cluster-config/templates/modules-minimal.yaml @@ -16,6 +16,10 @@ metadata: spec: version: 2 settings: + https: + certManager: + clusterIssuerName: selfsigned + mode: CertManager modules: publicDomainTemplate: "%s.{{ .Values.namespace }}.{{ .Values.discovered.domain }}" --- From 07a4043e0d59922668d9e3d9ab88ae54abac460f Mon Sep 17 00:00:00 2001 From: Nikita Korolev Date: Sun, 21 Dec 2025 22:24:49 +0300 Subject: [PATCH 26/71] static: rename tools to scripts Signed-off-by: Nikita Korolev --- test/dvp-static-cluster/{tools => scripts}/deckhouse-queue.sh | 0 test/dvp-static-cluster/{tools => scripts}/gen-kubeconfig.sh | 0 2 files changed, 0 insertions(+), 0 deletions(-) rename test/dvp-static-cluster/{tools => scripts}/deckhouse-queue.sh (100%) rename test/dvp-static-cluster/{tools => scripts}/gen-kubeconfig.sh (100%) diff --git a/test/dvp-static-cluster/tools/deckhouse-queue.sh b/test/dvp-static-cluster/scripts/deckhouse-queue.sh similarity index 100% rename from test/dvp-static-cluster/tools/deckhouse-queue.sh rename to test/dvp-static-cluster/scripts/deckhouse-queue.sh diff --git a/test/dvp-static-cluster/tools/gen-kubeconfig.sh b/test/dvp-static-cluster/scripts/gen-kubeconfig.sh similarity index 100% rename from test/dvp-static-cluster/tools/gen-kubeconfig.sh rename to test/dvp-static-cluster/scripts/gen-kubeconfig.sh From 8d656d64e29c7fffda5d5df4fa43c276199a12ee Mon Sep 17 00:00:00 2001 From: Nikita Korolev Date: Sun, 21 Dec 2025 22:26:08 +0300 Subject: [PATCH 27/71] static: fix condition, rename tools to scripts Signed-off-by: Nikita Korolev --- .github/workflows/e2e-reusable-pipeline.yml | 30 ++++++++++++--------- 1 file changed, 18 insertions(+), 12 deletions(-) diff --git a/.github/workflows/e2e-reusable-pipeline.yml b/.github/workflows/e2e-reusable-pipeline.yml index 537fb0f3b8..ee9133f905 100644 --- a/.github/workflows/e2e-reusable-pipeline.yml +++ b/.github/workflows/e2e-reusable-pipeline.yml @@ -309,10 +309,10 @@ jobs: } echo "[INFO] Copy script for generating kubeconfig in nested cluster" - echo "[INFO] Copy tools/gen-kubeconfig.sh to master" - d8vscp "./tools/gen-kubeconfig.sh" "${DEFAULT_USER}@${nested_master}.${NAMESPACE}:/tmp/gen-kubeconfig.sh" + echo "[INFO] Copy scripts/gen-kubeconfig.sh to master" + d8vscp "./scripts/gen-kubeconfig.sh" "${DEFAULT_USER}@${nested_master}.${NAMESPACE}:/tmp/gen-kubeconfig.sh" echo "" - d8vscp "./tools/deckhouse-queue.sh" "${DEFAULT_USER}@${nested_master}.${NAMESPACE}:/tmp/deckhouse-queue.sh" + d8vscp "./scripts/deckhouse-queue.sh" "${DEFAULT_USER}@${nested_master}.${NAMESPACE}:/tmp/deckhouse-queue.sh" echo "" echo "[INFO] Set file exec permissions" @@ -476,12 +476,16 @@ jobs: done echo "[INFO] Wait BlockDevice are ready" - workers=$(kubectl get nodes -o name | grep -c worker) + workers=$(kubectl get nodes -o name | grep -c worker || echo 0) workers=$((workers)) + if [[ $workers -eq 0 ]]; then + echo "[ERROR] No worker nodes found" + exit 1 + fi bdexists=false count=60 for i in $(seq 1 $count); do - blockdevices=$(kubectl get blockdevice -o name | wc -l) + blockdevices=$(kubectl get blockdevice -o name | wc -l || echo 0) if [ $blockdevices -ge $workers ]; then bdexists=true break @@ -555,7 +559,8 @@ jobs: local count=90 for i in $(seq 1 $count) ; do - if [[ "$(d8_queue_list)" == "0" ]]; then + queue_count=$(d8_queue_list) + if [ -n "$queue_count" ] && [ "$queue_count" = "0" ]; then echo "[SUCCESS] Queue is clear" break else @@ -621,9 +626,9 @@ jobs: echo "[INFO] Wait for ceph operator" for i in {1..60}; do echo "[INFO] Check ceph pods, mon mgr osd" - ceph_mgr=$(kubectl -n d8-operator-ceph get po 2>/dev/null | grep ceph-mgr | grep -c Running) - ceph_mon=$(kubectl -n d8-operator-ceph get po 2>/dev/null | grep ceph-mon | grep -c Running) - ceph_osd=$(kubectl -n d8-operator-ceph get po 2>/dev/null | grep ceph-osd | grep -c Running) + ceph_mgr=$(kubectl -n d8-operator-ceph get po 2>/dev/null | grep ceph-mgr | grep -c Running || echo 0) + ceph_mon=$(kubectl -n d8-operator-ceph get po 2>/dev/null | grep ceph-mon | grep -c Running || echo 0) + ceph_osd=$(kubectl -n d8-operator-ceph get po 2>/dev/null | grep ceph-osd | grep -c Running || echo 0) ceph_mgr=$((ceph_mgr)) ceph_mon=$((ceph_mon)) @@ -740,7 +745,8 @@ jobs: local count=90 for i in $(seq 1 $count) ; do - if [ $(d8_queue_list) == "0" ]; then + queue_count=$(d8_queue_list) + if [ -n "$queue_count" ] && [ "$queue_count" = "0" ]; then echo "[SUCCESS] Queues are empty" break else @@ -759,7 +765,7 @@ jobs: done } - virttualization_ready() { + virtualization_ready() { local count=90 local virtualization_status @@ -820,7 +826,7 @@ jobs: echo "[INFO] Waiting for Virtualization module to be ready" d8_queue - virttualization_ready + virtualization_ready e2e-test: name: E2E test (${{ inputs.storage_type }}) runs-on: ubuntu-22.04 From 3029d9ec07a8de067f06e6e54360f77b5d776b17 Mon Sep 17 00:00:00 2001 From: Nikita Korolev Date: Sun, 21 Dec 2025 23:00:11 +0300 Subject: [PATCH 28/71] static: fix gen-kubecfg,edit permissions Signed-off-by: Nikita Korolev --- test/dvp-static-cluster/scripts/deckhouse-queue.sh | 0 test/dvp-static-cluster/scripts/gen-kubeconfig.sh | 9 --------- 2 files changed, 9 deletions(-) mode change 100644 => 100755 test/dvp-static-cluster/scripts/deckhouse-queue.sh mode change 100644 => 100755 test/dvp-static-cluster/scripts/gen-kubeconfig.sh diff --git a/test/dvp-static-cluster/scripts/deckhouse-queue.sh b/test/dvp-static-cluster/scripts/deckhouse-queue.sh old mode 100644 new mode 100755 diff --git a/test/dvp-static-cluster/scripts/gen-kubeconfig.sh b/test/dvp-static-cluster/scripts/gen-kubeconfig.sh old mode 100644 new mode 100755 index 02e01b5e55..efab6c25ad --- a/test/dvp-static-cluster/scripts/gen-kubeconfig.sh +++ b/test/dvp-static-cluster/scripts/gen-kubeconfig.sh @@ -79,7 +79,6 @@ kubectl() { trap exit_trap SIGINT SIGTERM - SA_NAME=$1 CLUSTER_PREFIX=$2 CLUSTER_NAME=$3 @@ -107,13 +106,6 @@ else exit 1 fi -sleep 2 -log_info "====" -log_info "Kubeconfig will be created successfully if you connected to k8s cluster via ssh tunnel or directly" -log_info "====" -sleep 2 - - log_info "Apply SA, Secrets and ClusterAuthorizationRule" kubectl apply -f -< Date: Sun, 21 Dec 2025 23:13:31 +0300 Subject: [PATCH 29/71] static: fix cfg mc global Signed-off-by: Nikita Korolev --- .../charts/cluster-config/templates/modules-minimal.yaml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/test/dvp-static-cluster/charts/cluster-config/templates/modules-minimal.yaml b/test/dvp-static-cluster/charts/cluster-config/templates/modules-minimal.yaml index bd3568dc19..48c19235e5 100644 --- a/test/dvp-static-cluster/charts/cluster-config/templates/modules-minimal.yaml +++ b/test/dvp-static-cluster/charts/cluster-config/templates/modules-minimal.yaml @@ -16,11 +16,11 @@ metadata: spec: version: 2 settings: - https: - certManager: - clusterIssuerName: selfsigned - mode: CertManager modules: + https: + certManager: + clusterIssuerName: selfsigned + mode: CertManager publicDomainTemplate: "%s.{{ .Values.namespace }}.{{ .Values.discovered.domain }}" --- apiVersion: deckhouse.io/v1alpha1 From 64a9a7dc3cce1948a7132d8a49765977da9c0cad Mon Sep 17 00:00:00 2001 From: Nikita Korolev Date: Sun, 21 Dec 2025 23:23:08 +0300 Subject: [PATCH 30/71] static: clean Chart.yaml Signed-off-by: Nikita Korolev --- .../charts/cluster-config/Chart.yaml | 18 ------------------ .../dvp-static-cluster/charts/infra/Chart.yaml | 18 ------------------ 2 files changed, 36 deletions(-) diff --git a/test/dvp-static-cluster/charts/cluster-config/Chart.yaml b/test/dvp-static-cluster/charts/cluster-config/Chart.yaml index c61a43f29a..42ec5bc24c 100644 --- a/test/dvp-static-cluster/charts/cluster-config/Chart.yaml +++ b/test/dvp-static-cluster/charts/cluster-config/Chart.yaml @@ -1,24 +1,6 @@ apiVersion: v2 name: cluster-config description: A Helm chart for Kubernetes - -# A chart can be either an 'application' or a 'library' chart. -# -# Application charts are a collection of templates that can be packaged into versioned archives -# to be deployed. -# -# Library charts provide useful utilities or functions for the chart developer. They're included as -# a dependency of application charts to inject those utilities and functions into the rendering -# pipeline. Library charts do not define any templates and therefore cannot be deployed. type: application - -# This is the chart version. This version number should be incremented each time you make changes -# to the chart and its templates, including the app version. -# Versions are expected to follow Semantic Versioning (https://semver.org/) version: 0.1.0 - -# This is the version number of the application being deployed. This version number should be -# incremented each time you make changes to the application. Versions are not expected to -# follow Semantic Versioning. They should reflect the version the application is using. -# It is recommended to use it with quotes. appVersion: "1.16.0" diff --git a/test/dvp-static-cluster/charts/infra/Chart.yaml b/test/dvp-static-cluster/charts/infra/Chart.yaml index e0ab20a245..63bd80e963 100644 --- a/test/dvp-static-cluster/charts/infra/Chart.yaml +++ b/test/dvp-static-cluster/charts/infra/Chart.yaml @@ -1,24 +1,6 @@ apiVersion: v2 name: infra description: A Helm chart for Kubernetes - -# A chart can be either an 'application' or a 'library' chart. -# -# Application charts are a collection of templates that can be packaged into versioned archives -# to be deployed. -# -# Library charts provide useful utilities or functions for the chart developer. They're included as -# a dependency of application charts to inject those utilities and functions into the rendering -# pipeline. Library charts do not define any templates and therefore cannot be deployed. type: application - -# This is the chart version. This version number should be incremented each time you make changes -# to the chart and its templates, including the app version. -# Versions are expected to follow Semantic Versioning (https://semver.org/) version: 0.1.0 - -# This is the version number of the application being deployed. This version number should be -# incremented each time you make changes to the application. Versions are not expected to -# follow Semantic Versioning. They should reflect the version the application is using. -# It is recommended to use it with quotes. appVersion: "1.16.0" From da2a6bd156ea98b3b7409034ad621d4dcef56fc4 Mon Sep 17 00:00:00 2001 From: Nikita Korolev Date: Mon, 22 Dec 2025 09:50:47 +0300 Subject: [PATCH 31/71] static: fix sds conditions Signed-off-by: Nikita Korolev --- .github/workflows/e2e-matrix.yml | 4 +- .github/workflows/e2e-reusable-pipeline.yml | 326 ++++++++++++-------- 2 files changed, 207 insertions(+), 123 deletions(-) diff --git a/.github/workflows/e2e-matrix.yml b/.github/workflows/e2e-matrix.yml index 95a05c1e5b..eedf668b6e 100644 --- a/.github/workflows/e2e-matrix.yml +++ b/.github/workflows/e2e-matrix.yml @@ -156,12 +156,12 @@ jobs: fi # Add row to table - markdown_table+="| $link_text | $status | $passed | $failed | $pending | $skipped | $date | $time | $branch |\n" + markdown_table+="| $link_text | $status | $passed | $failed | $pending | $skipped | $date | $time | $branch |\n" } # Initialize markdown table markdown_table="" - header="| CSI | Status | Passed | Failed | Pending | Skipped | Date | Time | Branch|\n" + header="| CSI | Status | Passed | Failed | Pending | Skipped | Date | Time | Branch|\n" separator="|---|---|---|---|---|---|---|---|---|\n" markdown_table+="$header" markdown_table+="$separator" diff --git a/.github/workflows/e2e-reusable-pipeline.yml b/.github/workflows/e2e-reusable-pipeline.yml index ee9133f905..dce059a0d5 100644 --- a/.github/workflows/e2e-reusable-pipeline.yml +++ b/.github/workflows/e2e-reusable-pipeline.yml @@ -452,91 +452,137 @@ jobs: if: ${{ inputs.storage_type == 'replicated' }} working-directory: ${{ env.SETUP_CLUSTER_TYPE_PATH }}/storage/sds-replicated run: | - kubectl apply -f mc.yaml - echo "[INFO] Wait for sds-node-configurator" - kubectl wait --for=jsonpath='{.status.phase}'=Ready modules sds-node-configurator --timeout=300s + sds_replicated_ready() { + local count=60 + for i in $(1 seq $count); do + + sds_replicated_volume_status=$(kubectl get ns d8-sds-replicated-volume -o jsonpath='{.status.phase}' || echo "False") + + if [[ "${sds_replicated_volume_status}" = "Active" ]]; then + echo "[SUCCESS] Namespaces sds-replicated-volume are Active" + kubectl -n d8-sds-replicated-volume get pods + break + fi + + echo "[INFO] Waiting 10s for sds-replicated-volume to be ready (attempt ${i}/${count})" + if (( i % 5 == 0 )); then + echo "[INFO] Show namespaces sds-replicated-volume" + kubectl get ns | grep sds-replicated-volume || echo "Namespaces sds-replicated-volume are not ready" + echo "[DEBUG] Show queue (first 25 lines)" + d8 s queue list | head -n25 || echo "No queues" + fi + sleep 10 + done + echo "[ERROR] Namespaces sds-replicated-volume are not ready after ${count} attempts" + echo "[DEBUG] Show namespaces sds" + kubectl get ns | grep sds || echo "Namespaces sds-replicated-volume are not ready" + echo "[DEBUG] Show queue" + echo "::group::📦 Show queue" + d8 s queue list || echo "No queues" + echo "::endgroup::" + echo "[DEBUG] Show deckhouse logs" + echo "::group::📝 deckhouse logs" + d8 s logs | tail -n 100 + echo "::endgroup::" + exit 1 + } + + sds_pods_ready() { + local count=60 + for i in $(1 seq $count); do + echo "[INFO] Check sds-replicated pods, linstor-node csi-node webhooks" + linstor_node=$(kubectl -n d8-sds-replicated-volume get po 2>/dev/null | grep -c "linstor-node.*Running") + csi_node=$(kubectl -n d8-sds-replicated-volume get po 2>/dev/null | grep -c "csi-node.*Running") + webhooks=$(kubectl -n d8-sds-replicated-volume get po 2>/dev/null | grep -c "webhooks.*Running") + + linstor_node=$((linstor_node)) + csi_node=$((csi_node)) + webhooks=$((webhooks)) + + echo "[INFO] Check if sds-replicated pods are ready" + if [[ ${linstor_node} -ge ${workers} && ${csi_node} -ge ${workers} && ${webhooks} -ge 1 ]]; then + echo "[SUCCESS] sds-replicated-volume is ready" + break + fi + + echo "[INFO] Waiting 10s for sds-replicated-volume to be ready (attempt ${i}/${count})" + if (( i % 5 == 0 )); then + echo "[DEBUG] Get pods" + kubectl -n d8-sds-replicated-volume get pods || true + echo "[DEBUG] Show queue (first 25 lines)" + d8 s queue list | head -n 25 || echo "Failed to retrieve list queue" + echo " " + fi + done + echo "[ERROR] sds-replicated-volume is not ready after ${count} attempts" + echo "[DEBUG] Get pods" + echo "::group::📦 sds-replicated-volume pods" + kubectl -n d8-sds-replicated-volume get pods || true + echo "::endgroup::" + echo "[DEBUG] Show queue" + echo "::group::📦 Show queue" + d8 s queue list || echo "Failed to retrieve list queue" + echo "::endgroup::" + echo "[DEBUG] Show deckhouse logs" + echo "::group::📝 deckhouse logs" + d8 s logs | tail -n 100 + echo "::endgroup::" + exit 1 + } - for i in {1..60}; do - sds_replicated_volume_status=$(kubectl get ns d8-sds-replicated-volume -o jsonpath='{.status.phase}' || echo "False") + blockdevices_ready() { + local count=60 + workers=$(kubectl get nodes -o name | grep -c worker) + workers=$((workers)) - if [[ "${sds_replicated_volume_status}" = "Active" ]]; then - echo "[SUCCESS] Namespaces sds-replicated-volume are Active" - kubectl -n d8-sds-replicated-volume get pods - break + if [[ $workers -eq 0 ]]; then + echo "[ERROR] No worker nodes found" + exit 1 fi - echo "[INFO] Waiting 10s for sds-replicated-volume to be ready" - echo "[INFO] Show namespaces sds-replicated-volume" - kubectl get ns | grep sds-replicated-volume || echo "Namespaces sds-replicated-volume are not ready" + bdexists=false - if (( i % 5 == 0 )); then - d8 s queue list | head -n25 || echo "No queues" - fi - sleep 10 - done - - echo "[INFO] Wait BlockDevice are ready" - workers=$(kubectl get nodes -o name | grep -c worker || echo 0) - workers=$((workers)) - if [[ $workers -eq 0 ]]; then - echo "[ERROR] No worker nodes found" - exit 1 - fi - bdexists=false - count=60 - for i in $(seq 1 $count); do - blockdevices=$(kubectl get blockdevice -o name | wc -l || echo 0) - if [ $blockdevices -ge $workers ]; then - bdexists=true - break - fi - echo "[INFO] Wait 10 sec until blockdevices is greater or equal to $workers [${i}/${count}]" - if (( i % 5 == 0 )); then - d8 s queue list | head -n25 || echo "No queues" - fi - sleep 10 - done + for i in $(seq 1 $count); do + blockdevices=$(kubectl get blockdevice -o name | wc -l || echo 0) + if [ $blockdevices -ge $workers ]; then + bdexists=true + break + fi - if [ $bdexists = false ]; then + echo "[INFO] Wait 10 sec until blockdevices is greater or equal to $workers (attempt ${i}/${count})" + if (( i % 5 == 0 )); then + echo "[DEBUG] Show queue (first 25 lines)" + d8 s queue list | head -n25 || echo "No queues" + fi + + sleep 10 + done echo "[ERROR] Blockdevices is not 3" - echo "[DEBUG] Show blockdevice" + echo "[DEBUG] Show blockdevices" kubectl get blockdevice echo "[DEBUG] Show sds namespaces" kubectl get ns | grep sds || echo "ns sds is not found" echo "[DEBUG] Show cluster nodes" kubectl get nodes echo "[DEBUG] Show deckhouse logs" + echo "::group::📝 deckhouse logs" d8 s logs | tail -n 100 - echo " " + echo "::endgroup::" exit 1 - fi + } - echo "[INFO] Wait pods and webhooks sds-replicated pods" - for i in {1..60}; do - echo "[INFO] Check sds-replicated pods, linstor-node csi-node webhooks" - linstor_node=$(kubectl -n d8-sds-replicated-volume get po 2>/dev/null | grep linstor-node | grep -c Running || echo 0) - csi_node=$(kubectl -n d8-sds-replicated-volume get po 2>/dev/null | grep csi-node | grep -c Running || echo 0) - webhooks=$(kubectl -n d8-sds-replicated-volume get po 2>/dev/null | grep webhooks | grep -c Running || echo 0) - - linstor_node=$((linstor_node)) - csi_node=$((csi_node)) - webhooks=$((webhooks)) - - echo "[INFO] Check if sds-replicated pods are ready" - if [[ "${linstor_node}" -ge ${workers} && "${csi_node}" -ge ${workers} && ${webhooks} -ge 1 ]]; then - echo "[SUCCESS] sds-replicated-volume is ready" - break - fi + kubectl apply -f mc.yaml + echo "[INFO] Wait for sds-node-configurator" + kubectl wait --for=jsonpath='{.status.phase}'=Ready modules sds-node-configurator --timeout=300s - echo "[INFO] Waiting 10s for sds-replicated-volume to be ready" - if (( i % 5 == 0 )); then - echo "[DEBUG] Get pods" - kubectl -n d8-sds-replicated-volume get pods || true - echo "[DEBUG] Show queue (first 25 lines)" - d8 s queue list | head -n 25 || echo "Failed to retrieve list queue" - echo " " - fi - done + echo "[INFO] Wait for sds-replicated-volume to be ready" + sds_replicated_ready + + echo "[INFO] Wait BlockDevice are ready" + blockdevices_ready + + echo "[INFO] Wait pods and webhooks sds-replicated pods" + sds_pods_ready chmod +x lvg-gen.sh ./lvg-gen.sh @@ -575,6 +621,95 @@ jobs: done } + debug_output() { + echo "[DEBUG] Show ceph namespace" + echo "::group::📦 ceph namespace" + kubectl get ns | grep ceph || echo "Failed to retrieve ceph ns" + echo "::endgroup::" + echo "[DEBUG] Show ModuleConfig ceph" + echo "::group::📦 ModuleConfig ceph" + kubectl get mc | grep ceph || echo "Failed to retrieve mc" + echo "::endgroup::" + echo "[DEBUG] Show ceph in resource modules" + echo "::group::📦 ceph in resource modules" + kubectl get modules -o wide | grep ceph || echo "Failed to retrieve modules" + echo "::endgroup::" + echo "[DEBUG] Show queue" + echo "::group::📦 queue" + d8 s queue list || echo "Failed to retrieve list queue" + echo "::endgroup::" + echo "[DEBUG] Show deckhouse logs" + echo "::group::📝 deckhouse logs" + d8 s logs | tail -n 100 + echo "::endgroup::" + } + + ceph_operator_ready() { + local count=60 + for i in $(1 seq $count); do + ceph_operator_status=$(kubectl get ns d8-operator-ceph -o jsonpath='{.status.phase}' || echo "False") + csi_ceph_status=$(kubectl get module csi-ceph -o jsonpath='{.status.phase}' || echo "False") + + if [[ "${ceph_operator_status}" = "Active" && "${csi_ceph_status}" = "Ready" ]]; then + echo "[SUCCESS] Namespaces operator-ceph and csi are Active" + break + fi + + echo "[INFO] Waiting 10s for ceph operator and csi namespaces to be ready (attempt ${i}/${count})" + + if (( i % 5 == 0 )); then + echo "[DEBUG] Get namespace" + kubectl get namespace | grep ceph || echo "[WARNING] Namespaces operator-ceph and csi are not ready" + echo "[DEBUG] Show all namespaces" + kubectl get namespace + echo "[DEBUG] Show queue (first 25 lines)" + d8 s queue list | head -n25 || echo "[WARNING] Failed to retrieve list queue" + echo " " + fi + sleep 10 + done + debug_output + exit 1 + } + + ceph_ready() { + local count=60 + for i in $(1 seq $count); do + echo "[INFO] Check ceph pods, mon mgr osd" + ceph_mgr=$(kubectl -n d8-operator-ceph get po 2>/dev/null | grep -c "ceph-mgr.*Running") + ceph_mon=$(kubectl -n d8-operator-ceph get po 2>/dev/null | grep -c "ceph-mon.*Running") + ceph_osd=$(kubectl -n d8-operator-ceph get po 2>/dev/null | grep -c "ceph-osd.*Running") + + ceph_mgr=$((ceph_mgr)) + ceph_mon=$((ceph_mon)) + ceph_osd=$((ceph_osd)) + + echo "[INFO] check if ceph pods are ready" + if [[ $ceph_mgr -ge 2 && $ceph_mon -ge 3 && $ceph_osd -ge 3 ]]; then + echo "[SUCCESS] Ceph cluster is ready" + break + fi + + echo "[WARNING] Not all pods are ready, ceph_mgr=${ceph_mgr}, ceph_mon=${ceph_mon}, ceph_osd=${ceph_osd}" + echo "[INFO] Waiting 10s for ceph operator to be ready" + kubectl -n d8-operator-ceph get po || echo "Failed to retrieve pods" + if (( i % 5 == 0 )); then + echo "[DEBUG] Show ceph namespace" + kubectl get ns | grep ceph || echo "Failed to retrieve ceph ns" + echo "[DEBUG] Show ModuleConfig ceph" + kubectl get mc | grep ceph || echo "Failed to retrieve mc" + echo "[DEBUG] Show ceph in resource modules" + kubectl get modules -o wide | grep ceph || echo "Failed to retrieve modules" + echo "[DEBUG] Show queue" + d8 s queue list | head -n 25 || echo "Failed to retrieve list queue" + fi + echo "[INFO] Wait until all necessary pods are ready ${i}/${$count}" + sleep 10 + done + debug_output + exit 1 + } + export registry=${{ secrets.PROD_IO_REGISTRY_DOCKER_CFG }} yq e '.spec.registry.dockerCfg = env(registry)' -i 00-ms.yaml unset registry @@ -591,27 +726,7 @@ jobs: d8_queue echo "Start wait for ceph operator and csi" - for i in {1..60}; do - ceph_operator_status=$(kubectl get ns d8-operator-ceph -o jsonpath='{.status.phase}' || echo "False") - csi_ceph_status=$(kubectl get module csi-ceph -o jsonpath='{.status.phase}' || echo "False") - - if [[ "${ceph_operator_status}" = "Active" && "${csi_ceph_status}" = "Ready" ]]; then - echo "[SUCCESS] Namespaces operator-ceph and csi are Active" - break - fi - - echo "[INFO] Waiting 10s for ceph operator and csi namespaces to be ready" - echo "[INFO] Get namespace" - kubectl get namespace | grep ceph || echo "[WARNING] Namespaces operator-ceph and csi are not ready" - - if (( i % 5 == 0 )); then - echo "[DEBUG] Show all namespaces" - kubectl get namespace - echo " " - d8 s queue list | head -n25 || echo "[WARNING] Failed to retrieve list queue" - fi - sleep 10 - done + ceph_operator_ready echo "[INFO] Create ServiceAccounts" kubectl apply -f 02-sa.yaml @@ -624,38 +739,7 @@ jobs: kubectl -n d8-operator-ceph get po echo "[INFO] Wait for ceph operator" - for i in {1..60}; do - echo "[INFO] Check ceph pods, mon mgr osd" - ceph_mgr=$(kubectl -n d8-operator-ceph get po 2>/dev/null | grep ceph-mgr | grep -c Running || echo 0) - ceph_mon=$(kubectl -n d8-operator-ceph get po 2>/dev/null | grep ceph-mon | grep -c Running || echo 0) - ceph_osd=$(kubectl -n d8-operator-ceph get po 2>/dev/null | grep ceph-osd | grep -c Running || echo 0) - - ceph_mgr=$((ceph_mgr)) - ceph_mon=$((ceph_mon)) - ceph_osd=$((ceph_osd)) - - echo "[INFO] check if ceph pods are ready" - if [[ $ceph_mgr -ge 2 && $ceph_mon -ge 3 && $ceph_osd -ge 3 ]]; then - echo "[SUCCESS] Ceph cluster is ready" - break - fi - - echo "[WARNING] Not all pods are ready, ceph_mgr=${ceph_mgr}, ceph_mon=${ceph_mon}, ceph_osd=${ceph_osd}" - echo "[INFO] Waiting 10s for ceph operator to be ready" - kubectl -n d8-operator-ceph get po || echo "Failed to retrieve pods" - if (( i % 5 == 0 )); then - echo "[DEBUG] Show ceph namespace" - kubectl get ns | grep ceph || echo "Failed to retrieve ceph ns" - echo "[DEBUG] Show ModuleConfig ceph" - kubectl get mc | grep ceph || echo "Failed to retrieve mc" - echo "[DEBUG] Show ceph in resource modules" - kubectl get modules -o wide | grep ceph || echo "Failed to retrieve modules" - echo "[DEBUG] Show queue" - d8 s queue list | head -n 25 || echo "Failed to retrieve list queue" - fi - echo "[INFO] Wait until all necessary pods are ready ${i}/60" - sleep 10 - done + ceph_ready echo "[INFO] Show pods" kubectl get pods -n d8-operator-ceph From cbafd5275aa7e2ba9758e3a4846a8450c8e29b59 Mon Sep 17 00:00:00 2001 From: Nikita Korolev Date: Mon, 22 Dec 2025 12:08:50 +0300 Subject: [PATCH 32/71] static: rm jump host after cluster deployed Signed-off-by: Nikita Korolev --- .github/workflows/e2e-reusable-pipeline.yml | 20 ++++++++++---------- test/dvp-static-cluster/Taskfile.yaml | 11 ++++++----- 2 files changed, 16 insertions(+), 15 deletions(-) diff --git a/.github/workflows/e2e-reusable-pipeline.yml b/.github/workflows/e2e-reusable-pipeline.yml index dce059a0d5..b62bbfaf54 100644 --- a/.github/workflows/e2e-reusable-pipeline.yml +++ b/.github/workflows/e2e-reusable-pipeline.yml @@ -491,9 +491,9 @@ jobs: local count=60 for i in $(1 seq $count); do echo "[INFO] Check sds-replicated pods, linstor-node csi-node webhooks" - linstor_node=$(kubectl -n d8-sds-replicated-volume get po 2>/dev/null | grep -c "linstor-node.*Running") - csi_node=$(kubectl -n d8-sds-replicated-volume get po 2>/dev/null | grep -c "csi-node.*Running") - webhooks=$(kubectl -n d8-sds-replicated-volume get po 2>/dev/null | grep -c "webhooks.*Running") + linstor_node=$(kubectl -n d8-sds-replicated-volume get pods 2>/dev/null | grep -c "linstor-node.*Running") + csi_node=$(kubectl -n d8-sds-replicated-volume get pods 2>/dev/null | grep -c "csi-node.*Running") + webhooks=$(kubectl -n d8-sds-replicated-volume get pods 2>/dev/null | grep -c "webhooks.*Running") linstor_node=$((linstor_node)) csi_node=$((csi_node)) @@ -554,7 +554,7 @@ jobs: echo "[DEBUG] Show queue (first 25 lines)" d8 s queue list | head -n25 || echo "No queues" fi - + sleep 10 done echo "[ERROR] Blockdevices is not 3" @@ -676,9 +676,9 @@ jobs: local count=60 for i in $(1 seq $count); do echo "[INFO] Check ceph pods, mon mgr osd" - ceph_mgr=$(kubectl -n d8-operator-ceph get po 2>/dev/null | grep -c "ceph-mgr.*Running") - ceph_mon=$(kubectl -n d8-operator-ceph get po 2>/dev/null | grep -c "ceph-mon.*Running") - ceph_osd=$(kubectl -n d8-operator-ceph get po 2>/dev/null | grep -c "ceph-osd.*Running") + ceph_mgr=$(kubectl -n d8-operator-ceph get pods 2>/dev/null | grep -c "ceph-mgr.*Running") + ceph_mon=$(kubectl -n d8-operator-ceph get pods 2>/dev/null | grep -c "ceph-mon.*Running") + ceph_osd=$(kubectl -n d8-operator-ceph get pods 2>/dev/null | grep -c "ceph-osd.*Running") ceph_mgr=$((ceph_mgr)) ceph_mon=$((ceph_mon)) @@ -692,7 +692,7 @@ jobs: echo "[WARNING] Not all pods are ready, ceph_mgr=${ceph_mgr}, ceph_mon=${ceph_mon}, ceph_osd=${ceph_osd}" echo "[INFO] Waiting 10s for ceph operator to be ready" - kubectl -n d8-operator-ceph get po || echo "Failed to retrieve pods" + kubectl -n d8-operator-ceph get pods || echo "Failed to retrieve pods" if (( i % 5 == 0 )); then echo "[DEBUG] Show ceph namespace" kubectl get ns | grep ceph || echo "Failed to retrieve ceph ns" @@ -736,7 +736,7 @@ jobs: kubectl apply -f 04-cluster.yaml echo "[INFO] Get pod in d8-operator-ceph" - kubectl -n d8-operator-ceph get po + kubectl -n d8-operator-ceph get pods echo "[INFO] Wait for ceph operator" ceph_ready @@ -887,7 +887,7 @@ jobs: kubectl -n d8-virtualization get pods || true echo "[DEBUG] Show dvcr pod yaml" echo "::group::📦 dvcr pod yaml" - kubectl -n d8-virtualization get pod -l app=dvcr -o yaml || true + kubectl -n d8-virtualization get pods -l app=dvcr -o yaml || true echo "::endgroup::" echo "[DEBUG] Show dvcr pod describe" echo "::group::📦 dvcr pod describe" diff --git a/test/dvp-static-cluster/Taskfile.yaml b/test/dvp-static-cluster/Taskfile.yaml index 4b9a4d9204..226acc974a 100644 --- a/test/dvp-static-cluster/Taskfile.yaml +++ b/test/dvp-static-cluster/Taskfile.yaml @@ -134,12 +134,12 @@ tasks: cmds: - yq 'select( (.apiVersion + "/" + .kind) != "deckhouse.io/v1/InitConfiguration" and (.apiVersion + "/" + .kind) != "deckhouse.io/v1/ClusterConfiguration" and (.apiVersion + "/" + .kind) != "deckhouse.io/v1/StaticClusterConfiguration" )' {{ .TMP_DIR }}/config.yaml > {{ .TMP_DIR }}/config-manifests.yaml - render-all: - desc: Generate all manifests + infra-undeploy-jumphost: + desc: Undeploy Jumphost cmds: - - task render-infra - - task render-cluster-config - - task render-cluster-manifests + - | + kubectl -n {{ .NAMESPACE }} get all -l app=jump-host || true + kubectl -n {{ .NAMESPACE }} delete all -l app=jump-host || true dhctl-bootstrap: desc: Bootstrap DKP over DVP @@ -177,6 +177,7 @@ tasks: export end_time=$(date +%s) difference=$((end_time - {{.start_time}})) date -ud "@$difference" +'%H:%M:%S' + - task: infra-undeploy-jumphost show-connection-info: desc: Show connection info From 0fac3a3f45962a0b0c16ed0a833936e96d110b74 Mon Sep 17 00:00:00 2001 From: Nikita Korolev Date: Mon, 22 Dec 2025 12:36:20 +0300 Subject: [PATCH 33/71] install d8 cli via curl Signed-off-by: Nikita Korolev --- .github/workflows/e2e-reusable-pipeline.yml | 31 +++++++++++++++++---- 1 file changed, 26 insertions(+), 5 deletions(-) diff --git a/.github/workflows/e2e-reusable-pipeline.yml b/.github/workflows/e2e-reusable-pipeline.yml index b62bbfaf54..3fc0ef319b 100644 --- a/.github/workflows/e2e-reusable-pipeline.yml +++ b/.github/workflows/e2e-reusable-pipeline.yml @@ -135,8 +135,13 @@ jobs: version: 3.x repo-token: ${{ secrets.GITHUB_TOKEN }} + # - name: Setup d8 + # uses: ./.github/actions/install-d8 - name: Setup d8 - uses: ./.github/actions/install-d8 + run: | + sh -c "$(curl -fsSL https://raw.githubusercontent.com/deckhouse/deckhouse-cli/main/tools/install.sh)" + echo "/opt/deckhouse/bin" >> $GITHUB_PATH + - name: Log in to private registry uses: docker/login-action@v3 @@ -417,8 +422,12 @@ jobs: version: 3.x repo-token: ${{ secrets.GITHUB_TOKEN }} + # - name: Setup d8 + # uses: ./.github/actions/install-d8 - name: Setup d8 - uses: ./.github/actions/install-d8 + run: | + sh -c "$(curl -fsSL https://raw.githubusercontent.com/deckhouse/deckhouse-cli/main/tools/install.sh)" + echo "/opt/deckhouse/bin" >> $GITHUB_PATH - name: Install kubectl CLI uses: azure/setup-kubectl@v4 @@ -768,8 +777,12 @@ jobs: - uses: actions/checkout@v4 - name: Install kubectl CLI uses: azure/setup-kubectl@v4 + # - name: Setup d8 + # uses: ./.github/actions/install-d8 - name: Setup d8 - uses: ./.github/actions/install-d8 + run: | + sh -c "$(curl -fsSL https://raw.githubusercontent.com/deckhouse/deckhouse-cli/main/tools/install.sh)" + echo "/opt/deckhouse/bin" >> $GITHUB_PATH - name: Check kubeconfig run: | @@ -940,8 +953,12 @@ jobs: echo "Install ginkgo" go install tool + # - name: Setup d8 + # uses: ./.github/actions/install-d8 - name: Setup d8 - uses: ./.github/actions/install-d8 + run: | + sh -c "$(curl -fsSL https://raw.githubusercontent.com/deckhouse/deckhouse-cli/main/tools/install.sh)" + echo "/opt/deckhouse/bin" >> $GITHUB_PATH - name: Install kubectl CLI uses: azure/setup-kubectl@v4 @@ -1010,8 +1027,12 @@ jobs: sudo apt-get update sudo apt-get install -y apache2-utils + # - name: Setup d8 + # uses: ./.github/actions/install-d8 - name: Setup d8 - uses: ./.github/actions/install-d8 + run: | + sh -c "$(curl -fsSL https://raw.githubusercontent.com/deckhouse/deckhouse-cli/main/tools/install.sh)" + echo "/opt/deckhouse/bin" >> $GITHUB_PATH - name: Install Task uses: arduino/setup-task@v2 From 7b898793b086d16b939cd4d0452e3b87c940300f Mon Sep 17 00:00:00 2001 From: Nikita Korolev Date: Mon, 22 Dec 2025 14:31:24 +0300 Subject: [PATCH 34/71] fix loop seq Signed-off-by: Nikita Korolev --- .github/workflows/e2e-reusable-pipeline.yml | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/.github/workflows/e2e-reusable-pipeline.yml b/.github/workflows/e2e-reusable-pipeline.yml index 3fc0ef319b..0cfefea053 100644 --- a/.github/workflows/e2e-reusable-pipeline.yml +++ b/.github/workflows/e2e-reusable-pipeline.yml @@ -463,7 +463,7 @@ jobs: run: | sds_replicated_ready() { local count=60 - for i in $(1 seq $count); do + for i in $(seq 1 $count); do sds_replicated_volume_status=$(kubectl get ns d8-sds-replicated-volume -o jsonpath='{.status.phase}' || echo "False") @@ -482,6 +482,7 @@ jobs: fi sleep 10 done + echo "[ERROR] Namespaces sds-replicated-volume are not ready after ${count} attempts" echo "[DEBUG] Show namespaces sds" kubectl get ns | grep sds || echo "Namespaces sds-replicated-volume are not ready" @@ -498,7 +499,7 @@ jobs: sds_pods_ready() { local count=60 - for i in $(1 seq $count); do + for i in $(seq 1 $count); do echo "[INFO] Check sds-replicated pods, linstor-node csi-node webhooks" linstor_node=$(kubectl -n d8-sds-replicated-volume get pods 2>/dev/null | grep -c "linstor-node.*Running") csi_node=$(kubectl -n d8-sds-replicated-volume get pods 2>/dev/null | grep -c "csi-node.*Running") @@ -655,7 +656,7 @@ jobs: ceph_operator_ready() { local count=60 - for i in $(1 seq $count); do + for i in $(seq 1 $count); do ceph_operator_status=$(kubectl get ns d8-operator-ceph -o jsonpath='{.status.phase}' || echo "False") csi_ceph_status=$(kubectl get module csi-ceph -o jsonpath='{.status.phase}' || echo "False") @@ -683,7 +684,7 @@ jobs: ceph_ready() { local count=60 - for i in $(1 seq $count); do + for i in $(seq 1 $count); do echo "[INFO] Check ceph pods, mon mgr osd" ceph_mgr=$(kubectl -n d8-operator-ceph get pods 2>/dev/null | grep -c "ceph-mgr.*Running") ceph_mon=$(kubectl -n d8-operator-ceph get pods 2>/dev/null | grep -c "ceph-mon.*Running") From 61a6401e6586cef2e1f1baae2ddf2c57baa99675 Mon Sep 17 00:00:00 2001 From: Nikita Korolev Date: Mon, 22 Dec 2025 15:04:58 +0300 Subject: [PATCH 35/71] fix condition sds and queue script Signed-off-by: Nikita Korolev --- .github/workflows/e2e-reusable-pipeline.yml | 54 ++++++++++++------- .../scripts/deckhouse-queue.sh | 8 +-- 2 files changed, 41 insertions(+), 21 deletions(-) diff --git a/.github/workflows/e2e-reusable-pipeline.yml b/.github/workflows/e2e-reusable-pipeline.yml index 0cfefea053..7566724efa 100644 --- a/.github/workflows/e2e-reusable-pipeline.yml +++ b/.github/workflows/e2e-reusable-pipeline.yml @@ -469,8 +469,8 @@ jobs: if [[ "${sds_replicated_volume_status}" = "Active" ]]; then echo "[SUCCESS] Namespaces sds-replicated-volume are Active" - kubectl -n d8-sds-replicated-volume get pods - break + kubectl get ns d8-sds-replicated-volume + return 0 fi echo "[INFO] Waiting 10s for sds-replicated-volume to be ready (attempt ${i}/${count})" @@ -499,11 +499,17 @@ jobs: sds_pods_ready() { local count=60 + local linstor_node + local csi_node + local webhooks + local workers=$(kubectl get nodes -o name | grep -c worker) + workers=$((workers)) + for i in $(seq 1 $count); do echo "[INFO] Check sds-replicated pods, linstor-node csi-node webhooks" - linstor_node=$(kubectl -n d8-sds-replicated-volume get pods 2>/dev/null | grep -c "linstor-node.*Running") - csi_node=$(kubectl -n d8-sds-replicated-volume get pods 2>/dev/null | grep -c "csi-node.*Running") - webhooks=$(kubectl -n d8-sds-replicated-volume get pods 2>/dev/null | grep -c "webhooks.*Running") + linstor_node=$(kubectl -n d8-sds-replicated-volume get pods 2>/dev/null | grep -c "linstor-node.*Running" 2>/dev/null) + csi_node=$(kubectl -n d8-sds-replicated-volume get pods 2>/dev/null | grep -c "csi-node.*Running" 2>/dev/null) + webhooks=$(kubectl -n d8-sds-replicated-volume get pods 2>/dev/null | grep -c "webhooks.*Running" 2>/dev/null) linstor_node=$((linstor_node)) csi_node=$((csi_node)) @@ -512,7 +518,7 @@ jobs: echo "[INFO] Check if sds-replicated pods are ready" if [[ ${linstor_node} -ge ${workers} && ${csi_node} -ge ${workers} && ${webhooks} -ge 1 ]]; then echo "[SUCCESS] sds-replicated-volume is ready" - break + return 0 fi echo "[INFO] Waiting 10s for sds-replicated-volume to be ready (attempt ${i}/${count})" @@ -524,6 +530,7 @@ jobs: echo " " fi done + echo "[ERROR] sds-replicated-volume is not ready after ${count} attempts" echo "[DEBUG] Get pods" echo "::group::📦 sds-replicated-volume pods" @@ -556,7 +563,7 @@ jobs: blockdevices=$(kubectl get blockdevice -o name | wc -l || echo 0) if [ $blockdevices -ge $workers ]; then bdexists=true - break + return 0 fi echo "[INFO] Wait 10 sec until blockdevices is greater or equal to $workers (attempt ${i}/${count})" @@ -567,6 +574,7 @@ jobs: sleep 10 done + echo "[ERROR] Blockdevices is not 3" echo "[DEBUG] Show blockdevices" kubectl get blockdevice @@ -613,20 +621,21 @@ jobs: d8_queue() { local count=90 + local queue_count for i in $(seq 1 $count) ; do queue_count=$(d8_queue_list) if [ -n "$queue_count" ] && [ "$queue_count" = "0" ]; then echo "[SUCCESS] Queue is clear" - break + return 0 else echo "[INFO] Show queue list" d8 s queue list | head -n25 || echo "[WARNING] Failed to retrieve list queue" fi echo "[INFO] Wait until queues are empty ${i}/${count}" - kubectl get ns | grep sds || echo "Namespaces sds not found" - echo " " + kubectl get ns | grep ceph || echo "Namespaces ceph not found" + echo " " sleep 10 done } @@ -656,13 +665,16 @@ jobs: ceph_operator_ready() { local count=60 + local ceph_operator_status + local csi_ceph_status + for i in $(seq 1 $count); do ceph_operator_status=$(kubectl get ns d8-operator-ceph -o jsonpath='{.status.phase}' || echo "False") csi_ceph_status=$(kubectl get module csi-ceph -o jsonpath='{.status.phase}' || echo "False") if [[ "${ceph_operator_status}" = "Active" && "${csi_ceph_status}" = "Ready" ]]; then echo "[SUCCESS] Namespaces operator-ceph and csi are Active" - break + return 0 fi echo "[INFO] Waiting 10s for ceph operator and csi namespaces to be ready (attempt ${i}/${count})" @@ -678,17 +690,22 @@ jobs: fi sleep 10 done + debug_output exit 1 } ceph_ready() { local count=60 + local ceph_mgr + local ceph_mon + local ceph_osd + for i in $(seq 1 $count); do echo "[INFO] Check ceph pods, mon mgr osd" - ceph_mgr=$(kubectl -n d8-operator-ceph get pods 2>/dev/null | grep -c "ceph-mgr.*Running") - ceph_mon=$(kubectl -n d8-operator-ceph get pods 2>/dev/null | grep -c "ceph-mon.*Running") - ceph_osd=$(kubectl -n d8-operator-ceph get pods 2>/dev/null | grep -c "ceph-osd.*Running") + ceph_mgr=$(kubectl -n d8-operator-ceph get pods 2>/dev/null | grep -c "ceph-mgr.*Running" 2>/dev/null) + ceph_mon=$(kubectl -n d8-operator-ceph get pods 2>/dev/null | grep -c "ceph-mon.*Running" 2>/dev/null) + ceph_osd=$(kubectl -n d8-operator-ceph get pods 2>/dev/null | grep -c "ceph-osd.*Running" 2>/dev/null) ceph_mgr=$((ceph_mgr)) ceph_mon=$((ceph_mon)) @@ -697,7 +714,7 @@ jobs: echo "[INFO] check if ceph pods are ready" if [[ $ceph_mgr -ge 2 && $ceph_mon -ge 3 && $ceph_osd -ge 3 ]]; then echo "[SUCCESS] Ceph cluster is ready" - break + return 0 fi echo "[WARNING] Not all pods are ready, ceph_mgr=${ceph_mgr}, ceph_mon=${ceph_mon}, ceph_osd=${ceph_osd}" @@ -713,9 +730,10 @@ jobs: echo "[DEBUG] Show queue" d8 s queue list | head -n 25 || echo "Failed to retrieve list queue" fi - echo "[INFO] Wait until all necessary pods are ready ${i}/${$count}" + echo "[INFO] Wait until all necessary pods are ready ${i}/${count}" sleep 10 done + debug_output exit 1 } @@ -846,7 +864,7 @@ jobs: queue_count=$(d8_queue_list) if [ -n "$queue_count" ] && [ "$queue_count" = "0" ]; then echo "[SUCCESS] Queues are empty" - break + return 0 else echo "[INFO] Show queue list" d8 s queue list | head -n25 || echo "Failed to retrieve list queue" @@ -873,7 +891,7 @@ jobs: echo "[SUCCESS] Virtualization module is ready" kubectl get modules virtualization kubectl -n d8-virtualization get pods - break + return 0 fi echo "[INFO] Waiting 10s for Virtualization module to be ready (attempt $i/$count)" diff --git a/test/dvp-static-cluster/scripts/deckhouse-queue.sh b/test/dvp-static-cluster/scripts/deckhouse-queue.sh index 6492aeac0e..a524b2f824 100755 --- a/test/dvp-static-cluster/scripts/deckhouse-queue.sh +++ b/test/dvp-static-cluster/scripts/deckhouse-queue.sh @@ -78,11 +78,13 @@ d8_queue() { if [ $(d8_queue_list) == "0" ]; then log_success "Queue is clear" break - else - log_info "Show queue first 25 lines" - d8 p queue list | head -n25 || echo "Failed to retrieve queue" fi log_info "Wait until queues are empty ${i}/${count}" + if (( i % 5 == 0 )); then + log_info "Show queue first 25 lines" + d8 p queue list | head -n25 || echo "Failed to retrieve queue" + echo " " + fi sleep 10 done } From afcdd3afd92e6cfefaa640963dbfd33d217c38e1 Mon Sep 17 00:00:00 2001 From: Nikita Korolev Date: Mon, 22 Dec 2025 16:14:14 +0300 Subject: [PATCH 36/71] use deckhouse main, pr17193 merged Signed-off-by: Nikita Korolev --- .github/workflows/e2e-matrix.yml | 4 ++-- .github/workflows/e2e-reusable-pipeline.yml | 12 ++++++------ 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/.github/workflows/e2e-matrix.yml b/.github/workflows/e2e-matrix.yml index eedf668b6e..1ece5b91f4 100644 --- a/.github/workflows/e2e-matrix.yml +++ b/.github/workflows/e2e-matrix.yml @@ -39,7 +39,7 @@ jobs: nested_storageclass_name: nested-ceph-pool-r2-csi-rbd branch: main virtualization_tag: main - deckhouse_tag: pr17193 + deckhouse_tag: main pod_subnet_cidr: 10.88.0.0/16 service_subnet_cidr: 10.92.0.0/16 default_user: cloud @@ -59,7 +59,7 @@ jobs: nested_storageclass_name: nested-thin-r1 branch: main virtualization_tag: main - deckhouse_tag: pr17193 + deckhouse_tag: main pod_subnet_cidr: 10.89.0.0/16 service_subnet_cidr: 10.93.0.0/16 default_user: cloud diff --git a/.github/workflows/e2e-reusable-pipeline.yml b/.github/workflows/e2e-reusable-pipeline.yml index 7566724efa..23fd1818b0 100644 --- a/.github/workflows/e2e-reusable-pipeline.yml +++ b/.github/workflows/e2e-reusable-pipeline.yml @@ -511,9 +511,9 @@ jobs: csi_node=$(kubectl -n d8-sds-replicated-volume get pods 2>/dev/null | grep -c "csi-node.*Running" 2>/dev/null) webhooks=$(kubectl -n d8-sds-replicated-volume get pods 2>/dev/null | grep -c "webhooks.*Running" 2>/dev/null) - linstor_node=$((linstor_node)) - csi_node=$((csi_node)) - webhooks=$((webhooks)) + # linstor_node=$((linstor_node)) + # csi_node=$((csi_node)) + # webhooks=$((webhooks)) echo "[INFO] Check if sds-replicated pods are ready" if [[ ${linstor_node} -ge ${workers} && ${csi_node} -ge ${workers} && ${webhooks} -ge 1 ]]; then @@ -707,9 +707,9 @@ jobs: ceph_mon=$(kubectl -n d8-operator-ceph get pods 2>/dev/null | grep -c "ceph-mon.*Running" 2>/dev/null) ceph_osd=$(kubectl -n d8-operator-ceph get pods 2>/dev/null | grep -c "ceph-osd.*Running" 2>/dev/null) - ceph_mgr=$((ceph_mgr)) - ceph_mon=$((ceph_mon)) - ceph_osd=$((ceph_osd)) + # ceph_mgr=$((ceph_mgr)) + # ceph_mon=$((ceph_mon)) + # ceph_osd=$((ceph_osd)) echo "[INFO] check if ceph pods are ready" if [[ $ceph_mgr -ge 2 && $ceph_mon -ge 3 && $ceph_osd -ge 3 ]]; then From d1e7468db2a64269e351abe8dd15267b1f718358 Mon Sep 17 00:00:00 2001 From: Nikita Korolev Date: Mon, 22 Dec 2025 18:30:33 +0300 Subject: [PATCH 37/71] static: fix install d8, fix setup Signed-off-by: Nikita Korolev --- .github/actions/install-d8/action.yml | 13 ++++- .github/workflows/e2e-reusable-pipeline.yml | 53 ++++++++++----------- test/dvp-static-cluster/Taskfile.yaml | 4 +- 3 files changed, 38 insertions(+), 32 deletions(-) diff --git a/.github/actions/install-d8/action.yml b/.github/actions/install-d8/action.yml index 4c3f32f5bb..076827c837 100644 --- a/.github/actions/install-d8/action.yml +++ b/.github/actions/install-d8/action.yml @@ -3,8 +3,17 @@ description: Install deckhouse-cli runs: using: "composite" steps: - - name: Install deckhouse-cli + # - name: Install deckhouse-cli + # uses: werf/trdl/actions/setup-app@v0.12.2 + # with: + # repo: d8 + # url: https://deckhouse.ru/downloads/deckhouse-cli-trdl/ + # root-version: 1 + # root-sha512: 343bd5f0d8811254e5f0b6fe292372a7b7eda08d276ff255229200f84e58a8151ab2729df3515cb11372dc3899c70df172a4e54c8a596a73d67ae790466a0491 + # group: 0 + # channel: stable + - name: Install deckhouse-cli curl shell: bash run: | - sh -c "$(curl -fsSL https://raw.githubusercontent.com/deckhouse/deckhouse-cli/main/tools/install.sh)" + sh -c "$(curl -fsSL https://raw.githubusercontent.com/deckhouse/deckhouse-cli/main/tools/install.sh)" "" --version v0.25.1 echo "/opt/deckhouse/bin" >> $GITHUB_PATH diff --git a/.github/workflows/e2e-reusable-pipeline.yml b/.github/workflows/e2e-reusable-pipeline.yml index 23fd1818b0..e3c19d7050 100644 --- a/.github/workflows/e2e-reusable-pipeline.yml +++ b/.github/workflows/e2e-reusable-pipeline.yml @@ -135,13 +135,10 @@ jobs: version: 3.x repo-token: ${{ secrets.GITHUB_TOKEN }} - # - name: Setup d8 - # uses: ./.github/actions/install-d8 - name: Setup d8 - run: | - sh -c "$(curl -fsSL https://raw.githubusercontent.com/deckhouse/deckhouse-cli/main/tools/install.sh)" - echo "/opt/deckhouse/bin" >> $GITHUB_PATH - + uses: ./.github/actions/install-d8 + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - name: Log in to private registry uses: docker/login-action@v3 @@ -422,12 +419,11 @@ jobs: version: 3.x repo-token: ${{ secrets.GITHUB_TOKEN }} - # - name: Setup d8 - # uses: ./.github/actions/install-d8 - name: Setup d8 - run: | - sh -c "$(curl -fsSL https://raw.githubusercontent.com/deckhouse/deckhouse-cli/main/tools/install.sh)" - echo "/opt/deckhouse/bin" >> $GITHUB_PATH + uses: ./.github/actions/install-d8 + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + - name: Install kubectl CLI uses: azure/setup-kubectl@v4 @@ -576,12 +572,16 @@ jobs: done echo "[ERROR] Blockdevices is not 3" + echo "[DEBUG] Show cluster nodes" + kubectl get nodes echo "[DEBUG] Show blockdevices" kubectl get blockdevice echo "[DEBUG] Show sds namespaces" kubectl get ns | grep sds || echo "ns sds is not found" - echo "[DEBUG] Show cluster nodes" - kubectl get nodes + echo "[DEBUG] Show pods in sds-replicated-volume" + echo "::group::📦 pods in sds-replicated-volume" + kubectl -n d8-sds-replicated-volume get pods || true + echo "::endgroup::" echo "[DEBUG] Show deckhouse logs" echo "::group::📝 deckhouse logs" d8 s logs | tail -n 100 @@ -796,12 +796,11 @@ jobs: - uses: actions/checkout@v4 - name: Install kubectl CLI uses: azure/setup-kubectl@v4 - # - name: Setup d8 - # uses: ./.github/actions/install-d8 - name: Setup d8 - run: | - sh -c "$(curl -fsSL https://raw.githubusercontent.com/deckhouse/deckhouse-cli/main/tools/install.sh)" - echo "/opt/deckhouse/bin" >> $GITHUB_PATH + uses: ./.github/actions/install-d8 + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + - name: Check kubeconfig run: | @@ -972,12 +971,11 @@ jobs: echo "Install ginkgo" go install tool - # - name: Setup d8 - # uses: ./.github/actions/install-d8 - name: Setup d8 - run: | - sh -c "$(curl -fsSL https://raw.githubusercontent.com/deckhouse/deckhouse-cli/main/tools/install.sh)" - echo "/opt/deckhouse/bin" >> $GITHUB_PATH + uses: ./.github/actions/install-d8 + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + - name: Install kubectl CLI uses: azure/setup-kubectl@v4 @@ -1046,12 +1044,11 @@ jobs: sudo apt-get update sudo apt-get install -y apache2-utils - # - name: Setup d8 - # uses: ./.github/actions/install-d8 - name: Setup d8 - run: | - sh -c "$(curl -fsSL https://raw.githubusercontent.com/deckhouse/deckhouse-cli/main/tools/install.sh)" - echo "/opt/deckhouse/bin" >> $GITHUB_PATH + uses: ./.github/actions/install-d8 + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + - name: Install Task uses: arduino/setup-task@v2 diff --git a/test/dvp-static-cluster/Taskfile.yaml b/test/dvp-static-cluster/Taskfile.yaml index 226acc974a..a6af288973 100644 --- a/test/dvp-static-cluster/Taskfile.yaml +++ b/test/dvp-static-cluster/Taskfile.yaml @@ -138,8 +138,8 @@ tasks: desc: Undeploy Jumphost cmds: - | - kubectl -n {{ .NAMESPACE }} get all -l app=jump-host || true - kubectl -n {{ .NAMESPACE }} delete all -l app=jump-host || true + kubectl -n {{ .NAMESPACE }} get all -l infra=jump-host || true + kubectl -n {{ .NAMESPACE }} delete all -l infra=jump-host || true dhctl-bootstrap: desc: Bootstrap DKP over DVP From 4645294be1b148ed9ee7fcf8be586b0a1f99c8bf Mon Sep 17 00:00:00 2001 From: Nikita Korolev Date: Mon, 22 Dec 2025 19:11:34 +0300 Subject: [PATCH 38/71] static: add proccesing bootstrap failed Signed-off-by: Nikita Korolev --- .github/workflows/e2e-matrix.yml | 72 +++++++++++++++++---- .github/workflows/e2e-reusable-pipeline.yml | 7 ++ 2 files changed, 66 insertions(+), 13 deletions(-) diff --git a/.github/workflows/e2e-matrix.yml b/.github/workflows/e2e-matrix.yml index 1ece5b91f4..d0c8cc3ba6 100644 --- a/.github/workflows/e2e-matrix.yml +++ b/.github/workflows/e2e-matrix.yml @@ -97,6 +97,29 @@ jobs: esac } + # Generate bootstrap failure summary + generate_bootstrap_failure_summary() { + local storage_type=$1 + local workflow_run_id=$2 + local csi=$(get_csi_name "$storage_type") + local date=$(date +"%Y-%m-%d") + local time=$(date +"%H:%M:%S") + local branch="${GITHUB_HEAD_REF:-${GITHUB_REF#refs/heads/}}" + local link="${GITHUB_SERVER_URL}/${GITHUB_REPOSITORY}/actions/runs/${workflow_run_id}" + + # Create JSON summary for bootstrap failure + local summary_json=$(jq -n \ + --arg csi "$csi" \ + --arg date "$date" \ + --arg time "$time" \ + --arg branch "$branch" \ + --arg status ":x: BOOTSTRAP CLUSTER FAILED" \ + --arg link "$link" \ + '{CSI: $csi, Date: $date, StartTime: $time, Branch: $branch, Status: $status, Passed: 0, Failed: 0, Pending: 0, Skipped: 0, Link: $link}') + + echo "$summary_json" + } + # Parse summary JSON and add to table parse_summary() { local summary_json=$1 @@ -170,21 +193,44 @@ jobs: DATE=$(date +"%Y-%m-%d") COMBINED_SUMMARY="## :dvp: **DVP | End-to-End tests | $DATE**\n\n" - # Save to json files - cat > /tmp/ceph.json << 'EOF' - ${{ needs.e2e-ceph.outputs.e2e-summary }} - EOF - - cat > /tmp/replicated.json << 'EOF' - ${{ needs.e2e-replicated.outputs.e2e-summary }} - EOF - - if [ -s /tmp/ceph.json ] && [ "$(cat /tmp/ceph.json)" != '""' ] && [ "$(cat /tmp/ceph.json)" != '{}' ]; then - parse_summary "$(cat /tmp/ceph.json)" "ceph" + # Check bootstrap status and generate summaries + ceph_bootstrap_failed="${{ needs.e2e-ceph.outputs.bootstrap-failed }}" + replicated_bootstrap_failed="${{ needs.e2e-replicated.outputs.bootstrap-failed }}" + ceph_run_id="${{ needs.e2e-ceph.outputs.workflow-run-id }}" + replicated_run_id="${{ needs.e2e-replicated.outputs.workflow-run-id }}" + + # Handle ceph storage type + if [ "$ceph_bootstrap_failed" == "true" ]; then + echo "[INFO] Bootstrap failed for ceph, generating failure summary" + ceph_summary=$(generate_bootstrap_failure_summary "ceph" "$ceph_run_id") + echo "$ceph_summary" > /tmp/ceph.json + parse_summary "$ceph_summary" "ceph" + else + # Save to json files + cat > /tmp/ceph.json << 'EOF' + ${{ needs.e2e-ceph.outputs.e2e-summary }} + EOF + + if [ -s /tmp/ceph.json ] && [ "$(cat /tmp/ceph.json)" != '""' ] && [ "$(cat /tmp/ceph.json)" != '{}' ]; then + parse_summary "$(cat /tmp/ceph.json)" "ceph" + fi fi - if [ -s /tmp/replicated.json ] && [ "$(cat /tmp/replicated.json)" != '""' ] && [ "$(cat /tmp/replicated.json)" != '{}' ]; then - parse_summary "$(cat /tmp/replicated.json)" "replicated" + # Handle replicated storage type + if [ "$replicated_bootstrap_failed" == "true" ]; then + echo "[INFO] Bootstrap failed for replicated, generating failure summary" + replicated_summary=$(generate_bootstrap_failure_summary "replicated" "$replicated_run_id") + echo "$replicated_summary" > /tmp/replicated.json + parse_summary "$replicated_summary" "replicated" + else + # Save to json files + cat > /tmp/replicated.json << 'EOF' + ${{ needs.e2e-replicated.outputs.e2e-summary }} + EOF + + if [ -s /tmp/replicated.json ] && [ "$(cat /tmp/replicated.json)" != '""' ] && [ "$(cat /tmp/replicated.json)" != '{}' ]; then + parse_summary "$(cat /tmp/replicated.json)" "replicated" + fi fi COMBINED_SUMMARY+="${markdown_table}\n" diff --git a/.github/workflows/e2e-reusable-pipeline.yml b/.github/workflows/e2e-reusable-pipeline.yml index e3c19d7050..e0997bc7c7 100644 --- a/.github/workflows/e2e-reusable-pipeline.yml +++ b/.github/workflows/e2e-reusable-pipeline.yml @@ -78,6 +78,12 @@ on: e2e-summary: description: "E2E test results" value: ${{ jobs.e2e-test.outputs.report-summary }} + bootstrap-failed: + description: "Bootstrap cluster failed" + value: ${{ jobs.bootstrap.outputs.bootstrap-failed }} + workflow-run-id: + description: "Workflow run ID" + value: ${{ github.run_id }} env: BRANCH: ${{ inputs.branch }} @@ -102,6 +108,7 @@ jobs: kubeconfig-content: ${{ steps.generate-kubeconfig.outputs.config }} storage-type: ${{ steps.vars.outputs.storage_type }} nested-storageclass-name: ${{ steps.vars.outputs.nested_storageclass_name }} + bootstrap-failed: ${{ steps.dhctl-bootstrap.outcome == 'failure' }} steps: - uses: actions/checkout@v4 # with: From 072742ad853fe0f13069163bb3f96f36d0de0a1d Mon Sep 17 00:00:00 2001 From: Nikita Korolev Date: Mon, 22 Dec 2025 19:14:17 +0300 Subject: [PATCH 39/71] static: fix task show-connection-info Signed-off-by: Nikita Korolev --- test/dvp-static-cluster/Taskfile.yaml | 11 ----------- 1 file changed, 11 deletions(-) diff --git a/test/dvp-static-cluster/Taskfile.yaml b/test/dvp-static-cluster/Taskfile.yaml index a6af288973..c19addb66f 100644 --- a/test/dvp-static-cluster/Taskfile.yaml +++ b/test/dvp-static-cluster/Taskfile.yaml @@ -186,10 +186,6 @@ tasks: sh: yq eval '.discovered.domain' {{ .DISCOVERED_VALUES_FILE }} PASSWORD: sh: cat {{ .PASSWORD_FILE }} - JUMPHOST_EXT_IP: - sh: kubectl -n {{ .NAMESPACE }} exec deployment/jump-host -- dig @resolver4.opendns.com myip.opendns.com +short - JUMPHOST_NODEPORT: - sh: kubectl -n {{ .NAMESPACE }} get svc jump-host -o json | jq '.spec.ports[] | select(.port==2222) | .nodePort' MASTER_NODE_NAME: sh: kubectl get node -l node.deckhouse.io/group=master -o jsonpath="{.items[0].metadata.name}" @@ -200,14 +196,7 @@ tasks: echo "Host cluster master node: {{ .MASTER_NODE_NAME }}" echo "Namespace: {{ .NAMESPACE }}" echo "OS User: {{ .DEFAULT_USER }}" - echo "Bastion: user@{{ .JUMPHOST_EXT_IP }}:{{ .JUMPHOST_NODEPORT }}" echo vms: kubectl -n {{ .NAMESPACE }} get vm echo "Grafana URL https://grafana.{{ .NAMESPACE }}.{{ .DOMAIN }}" echo "Default user/password admin@deckhouse.io/{{ .PASSWORD}}" - - install: - cmds: - - task: infra-deploy - - task: dhctl-bootstrap - - task: show-connection-info From 3ec3770b7cf985ff8510105a7fae9878ec050202 Mon Sep 17 00:00:00 2001 From: Nikita Korolev Date: Mon, 22 Dec 2025 19:50:10 +0300 Subject: [PATCH 40/71] bump helm lib to 1.65.2 Signed-off-by: Nikita Korolev --- .helmignore | 4 ++++ Chart.yaml | 2 +- Taskfile.yaml | 2 +- charts/deckhouse_lib_helm-1.55.1.tgz | Bin 26935 -> 0 bytes charts/deckhouse_lib_helm-1.65.2.tgz | Bin 0 -> 29485 bytes requirements.lock | 6 +++--- templates/custom-certificate.yaml | 10 +++++++--- 7 files changed, 16 insertions(+), 8 deletions(-) delete mode 100644 charts/deckhouse_lib_helm-1.55.1.tgz create mode 100644 charts/deckhouse_lib_helm-1.65.2.tgz diff --git a/.helmignore b/.helmignore index 4bfefaf86a..a08f283f48 100644 --- a/.helmignore +++ b/.helmignore @@ -6,6 +6,10 @@ images lib Makefile openapi +test +tests +tmp +tools *.md release.yaml werf*.yaml diff --git a/Chart.yaml b/Chart.yaml index 6edc72f1c5..d80ca1ad08 100644 --- a/Chart.yaml +++ b/Chart.yaml @@ -2,5 +2,5 @@ name: virtualization version: 0.0.1 dependencies: - name: deckhouse_lib_helm - version: 1.55.1 + version: 1.65.2 repository: https://deckhouse.github.io/lib-helm diff --git a/Taskfile.yaml b/Taskfile.yaml index 020d59f4f7..9d9d3a66a7 100644 --- a/Taskfile.yaml +++ b/Taskfile.yaml @@ -26,7 +26,7 @@ includes: dir: ./src/cli vars: - deckhouse_lib_helm_ver: 1.55.1 + deckhouse_lib_helm_ver: 1.65.2 TRIVY_VERSION: 0.55.0 target: "" VALIDATION_FILES: "tools/validation/{main,messages,diff,no_cyrillic,doc_changes}.go" diff --git a/charts/deckhouse_lib_helm-1.55.1.tgz b/charts/deckhouse_lib_helm-1.55.1.tgz deleted file mode 100644 index 73159b8f03a315ecc2a8f65fa75488f44e34298c..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 26935 zcmV)pK%2iGiwG0|00000|0w_~VMtOiV@ORlOnEsqVl!4SWK%V1T2nbTPgYhoO;>Dc zVQyr3R8em|NM&qo0POwycHB0yD2(sF^%NL7S)FC$_9ll9N3v zGarK{(cKuc0R{ji_t;+RJj8jz^CaKGjRZ)L&6ZlSk`ebFM@<4%g+igK02B%-O5V+A z!BL#yaXdrWeEV<)nHVkLJo~TpJUcr(J1?F+Q~%xB*{T0`XK#1!zjmMR>^^_K`}D=` z^Z(k}efIU&PyY+-tO2FSG{q+lLzMbC&W0WWFK7ZZKQ}N4Cg}q{sCok zPzaoHAkaL|pgtc1oTGiv6s!%g$k9H?@R&ii*toK2 zcSp~kk9IdcHz)oxpTPLHea*LHnB&;=Qw&KOzr!RQiF`%Huw?u{eg5L9KmPajc6XjX zj{p03K71I0?XSRF4yR}zeE0w`NwOkEAd<46ghF%J# zMFa5Z6ZmQy3_pF^FaQ}61tS~?Rv>^UK+F)}IZE&ZqZBa2X~7Z%BoMHfK#qo(a6~v3 z_zDdG=76D`A&!VZDZoSlNV0+p#9UNLAGK2Z>C*-Pve*|1B?7${Kt1~}0VXMW4@O`| zLIQ^9ORME<>1!-zVDuBr3dFa-NLj`$fJ9ea}~I9eZ7Vk$)9wF#y|?r1SD=z2~p-hGURD+z9kI4Ajxe(5Sa5B72=j~GJJ&y;yh<`jBW*~ z8js7sJl_d`S>i1DBR-x(f+xuH{H4K|;A!1YYB(qDkec3!=a@)JtM3y$^_`2V_N2%% z>wK7^qw?KHNf9hE0iuNCVMU3H-|5|BBE7a$!bv;1i(vulWR0YL|Y8`_Ay0x|z z(gu3>Z3HGzTC!$kL35hX>7rh}_P(9;f*kO3O2u1_SVg(}wjDWoFBm*z2nzHX37A3w z>vXgiX$M<%)yW)Az22>V`Un{Mt-uVWAZjb4rjojl+x3o~y2Nut3*jOPe02os#mKebYBI3EB}`_c-*;duZ`;A<-+n5Jss z6=sJVAC^*nM$-dAsDOG=)@^043Kc#*AcF|Y%E#oY4ToPQ1m?mhwKfpW_iic<*mNls zs!TVSpOgSCg=CBM0rr{}MD&Dc$}RzWcCZxM*})PZXKHr51dJ}!1_)wOA4H+=jv9Am z?gz@nDH7L|z4P>WTN!5%NXr#Vzr3JMfg0y8x|gb|zgj$pjZ#^J4|-FW>BZ zYcM9%PvsDP+J$f1sv~*vIO~xf#EBh zp(#pBVm;TYSA~E&DOZb%F=7M>#4phNCk>pV`ITB1G^-GL&1p2v=on_B6!U~$A+{J* zb?wrd1K6=e%I2RMr|@4C6A)R~Rr-O)?*(F#B~^{6N!aK${1;`R_n1&N2;0cgS!p`e zdTT6B=zLB|jAg-?(0BrIW{E3Z;b3D1ZfmXa>e07nW#khv%)f?coPP_I?8ut^=XNVzf5pUHr2i+Q4RwLeTSmn(0?7EAp#_ifz!@Q1$ z1{&kF)|=5UY40ny?5$eb#JYyIhMVHH7Ec1Y&R^c%Y|-IJ&-Y&I@Br`;%peER6=DJ> zFdODH9l}CTp1=&TVZu_i1+y_64maH31X~7^sk55>4wG~r{A7Sh&$a}$F*g&={S5$g zOu~l`&Z2>$UjD^%n4o=-elwKc%jIwt1#?=KRIO}da|XvK;|<1crZ^EGf_WY_Rv`uh z@DUJ9QbYvUlQ?*e5=pCoY>LD=n(Qm~x=fIu`1aLqq`v6JAE7*>i#Z}fy=-e~k*81~ zy&SIr>o>1yiuOSy`6i*-B_+3DVvW0ci4oVYht-Euh16a9i_EUx9Fv1Aqt__iH*adJ zY*{ZXc~~*&n`+;14&OJRs^zb}oq74fTLudgY|T(_YC}t;cR{KhbG4-<1GU)@Q?7SA z0;{Wc)r@irw`!HYZMKXJ(klGG}dERWGWLv}jdj5*G z)DNFmHCC0pVkpmW0=ecAL)uFJfVrS-@e0qe*av$z<{toRD<4gbkD+5j&rzcG3Zxfg zcA*Vehl}#bJPfTkoS{V-*qlLb7MY_YqeVJ2fjDd!g;+iSoBbADoroq_z1{Z-SP^*z z5&&IZFT)Iz8A{&qV(x9=>5zsBj*aLUeG54{gJK4v?GlPoFcZ()!LAX_>Kmy ze&aK^_xuGf<~}zaMH19p9gEFQqgGhc4rsP-mXPGUumUZgS+&Nf<)w86vV=C%ONcGS z&rXgSyg*XKl~6aR_P=*h3RJvuN$I z#tLDegWNKK4tcRmMjBcxXP7c97IBh6F4dMO@(fk-7x4ubXg)MipTmS;(3F==n7#2H&H8EEDl%I0;oss}R#&5#a_3ULyW#ZZzFf#@2G z*{~ogh#zM7-zZ(iPk)bi2_4214zhfFxOTD}l;E6glI_$9euI1xh& zFxm)>oo=XlJFHHsQ_pPY3@2RrXfM%w@vKHghM;s~l!n8Ugb1w_8I~UJ%P%%h4_+T% zoE;n<50+HQ5aei>7Fy4}e7eJzRA7jp>tTvunqh)o?)9i3*kYI?hH3ipdA+n=$s8sy zhY1#ox-gs9LwZdRV`wtW&=tyF?z!^Z5i(QfyDB=={_xQHza?+mdh1$65e&sl>I7aN zn>NP49h-<;xdU5kCo>ky_W)eMtU&uV&(u>MnwbO zuFOrfEX!Ro8Tze2TyUTCP9uWy8JZ&ovsg~!;1tCfo$^>Z1+fyi2<+}SCS!NcQ(c4# zu-Ud*d&7nPR@(co47*f%gTnIKF57-?OY3i=u5M=Q(PX!>u}-1?6b&BGj`dKmyHbsh zD(MfPl6r3cwCW=VWb5Wm)g9*h=T=%?Y{cq1s}{WasIqRNvbf92M^*Kxs+Ov%emlH9 zG*yQJ3-04OeWX5rZuRBHKlRgIwE_CD6vVDA-nC^|#XeW8Z!g%j%-OQt-5#^8A5R^% zM}gl8b5}TZmkQ|@!My@$|EVgbSHQ(BHB4(*g`;MwGM`=7)Z-9Wcb5WZbRJ!%ThHIcm4%-bo6n{q>=F$}o5N^!IX7f=*@G^1Q7 zh+)1}3;OFto71$y>NEIp#&;`?wMu~H(iQV6OlZUUV*Sp4)n*+xQDsOr848rfe8FRw z&M}Ge5>-H-g(Ubs^k_ha9hKTVtlptdAINMjbck0!T{F5`tre$Eo(guyrqtWtBjFF% zTo3jcf|;28+y6wF@U5Av)t9!|IsaP0>oafE6<2Ka8kOA#oz~+!$^aywAnyphCV(Nx zDFIg(>L3~L13$qvRSAy-WIEfIoM-&HE#~>JAjM3DDlMv1UCxj)Caf8+SdVJF(kIx0 z=1k1b z`>c*Xw*eY{c0q8M=9Ar9pncX=dZ`hv7EnlNo{Pm1X8YiS+kbbjiEgWhLRzS$F+OjW zYoJw>&kA9*;RKlS!wqseu`x`fGqYO$CS5CVV5B@L;>SV;4nAm`)i~^Mn?nwx6;C&G z2X3lZxQ&e`;AQn}Jn=#`>)$SP9$V?E@cr%WDHgM0JWA+%yE@A&=g8a09C;o((6+}J z9dGOS-|bYLBXO#2OO~sseCM=@i4i}i8S;8Tm4$_vQHK9r&-vKwiu9ZP2Iuz^j`u6> z?T0A7+umW)R_$*L0nG9Dj21cH2fswoKQ~;P3x0`m*=jBj5k_07B3Fow)#oV^(UzPK zarNJ8D3Y1{L&ws|zfzPTfuet|jw;C@p7+G357_p>qEBG-0owp(EQaar0BHSP#n|^^ zR1b2|6(V(NJGgE5umz&Lr444SZM3X0rn^n?j@GOTg|aV=wsvD$f*|3T&~#m0(I3F?bXok$1Vbi#+ zffKg>JGk$3;B8H6Kbk(P_hanq6Yxe3bxIJRIy3)-W*NQ4WC~Ofm8jeq&6PDEe*(^=X5+lPSLN{H}xYKkxWOr?}B#u?@_sdk=79vXEA?5PBR$%L}GZqE%&mUSaEIJ~;cRd)hhFYF1p zoFVZ2@g>L|HjGh5uT`r}gC$?CVfIe8xyV(s)maNZ!#PN15KCDMGjAj@EKF|w$pj}@ z4hmT|Est-5D-5MaFQ7n^BD+8WY?l01W^;-JIHTzRK$1G=zQLwjLUtxiRS_Z0)VV%K zEx`#2CRo3Bst} zo>ZqAytk{*NG+V-$#m2?%<=o3i!PrN-QhIiE_~s2w5fr2TW=cs=<|oRK6uC5 zVZ+1lx*dWZg3J9`y~}$u@DGLQ55l^yI3y|~zVW0y`QyG@-L4xbzZ0`wop$>DTJ;vY zx5j1VKCpvxg&RQ%QrT^k?|$w0A(;Jp5!MH9^(!ts45MGs^Jw$$%jQ22o<9hazuI{4 zX6^1FmRnBe9Z*Y5zvQZ0TGBeKy?h9+fq%m10k{Y2-T=M9j_}%cIB&i&yw*L{id)N< z7b?xS-VJ3-SZkku-@T{L_xAkze|NurzPIyu|L=V~Zs@N{RI7u%Vtd(k_>$SZ zo_EzMh&KVyBcfTszUBS2br_{BxW}7mbza`8uKeA&__TT%y7IL8+H%>c?$q!8pQBS3 zsPi@i)%n~4)=!e_-D~N8ahe>y(ejk0sP(2p`?0NJ`-PI_*S4zd7fF_1TgmOZJkl-D z4WR47$VRC)P_NwcZv1ebw!>={t$?n06Ji@RHh=adV)Krc`Nf=}ck@dT?GaVgc_h6M zP~b%V;72H@8C`_#Gi)#1_iEJMcVmKE+!cB23m?5)`?8%qZ;lNVZD&lP%eUY6S^wI7 z)2A)nyyMgT(sjG%+Ap2-RO-1klS?julhhvhHEwkcTnPIbE`n=o)WAD<7{=}&^pnk4 zKEO6hdgID2XS7IT%Hr8#%y1f;^#9di!@-JI%_S(G4HJ%|l`Bf?JmEL0Cz0N$BA5S6 z5kU+mHv%xr(ajJ)y&1X!UL)+D9{uomA7I~`c$DwAzMB9cnp1Lt?)xIZ=CMf6V{3Kx zR=BXLd7qbL-B2r{VL4gpeYMxpsm@Jr`Fg(6rDUrASEBijN&Mhn3|5z`-^m46ONNz} z_lwZ|Q#6wb<*h1ki@U8J;^kFMH}3J;s^v~`$8qi!&v;!|RekvE*BQadM9xMQ6=aF; zpz#)zivvdm!(@7q%urg$fpjt@w0wR19wmk3lAjrY)Te^}=b;{U$M187^udsNj;SG_ zIbu+<0GyE8M*!ZT#lAu^D%U!tvmgh2qpZqq)j%Lo97-uXws{)b3Y;M}hs0jR8z`WC z;KtY0oM!Z!qbf>;H)Lb!Dz$x;4ypEK0|nYX)Sc003FB=Bsy=P0(C`;sCUx*9kXo|nXJI&9qxnDQiZ(4_eeP$w<;7)$nwi`R7}jznS?J=x`cj5h*u65) zU8@LYF=@hD-!*~lSC9o==&(rVd54aa3*l^N5!c~bKmrf*2LIHy#6SL)$fF-PfE`-m+XHawP= zKeH1$p!8I-Gsl{)f1mE+2~ zuzi^(kcsi8i6b9VN3Jz>q#yKQZp5eug48_t&^^n2>h->Xxh0=vt8b&`Zc=x+CN`+; zwvE$kP5xrrRmEyO`N$@)tYHsn*8YflqY0?g6`I%W*S8b#hUp7#8*3d6Roy|1tGAoY z?DPQ`>?#<+1EM6&`$pNYL*1&z+qYUw`e#f9~x*fByAj z{O5f`BMa@jI+h2i$H09t|6S?8I3rgs?cK+sn{=TLDzR*8ZBDB*t<_98Im{FqBtwDyRkn4nZ<%)6* zbc%9BQVu8qzbFC#M?zpS<^SBYhhn{ry`0gsNCbD!$4YsgP&VIAX;Mh!YA9}B?N(LI zK6Sy+&`gA8VxB$8A%k-y5aUDbar2>~V$i7q>MRS!m5*e7n=;+*d;?oRdhozZ&9=-T zpe=wzQsqJ~cZh@YIHO_?O|!I0t!3lyUvs2+-&i796~ajN*_nk^UT;*&&PKV=>_&;) z0J<|{uvp|r-6MB_rQtBHfl)ljsf)F-ig)sguZ>wvJRRKnf>2$2|0U1?w7Yv>k9I~o zqumHZyD$DS+I#+NWd0kiM3H%UMCTBb)!IVSi(*WWI6OHzU#X^<$vU-fFB_Ia^(l^h z|5B=v1d8g-8gRI=VfL=WHz859H9wj_=&dLuVT6XntGn(WatiiYpKkwehNrU_UO}AM zT;4Zv0hjrI&z?W4`+v`NAJ6~Y%hSXE!#tkBSkPDv)9Rqz0&zfS{hZzQSAZc=Fv9h) z2GIiXNUAwIi^3d8xyly6NLGfnbmZCp{ontmtQ-*vz7eqbnHA&N4{Q3I9P2i32+#Zby~*^`YZbtv;jC6rdS2tA1$&MjeA&U6`cB zcMDw4a59qvTd0-a6pR;OO3N^40J%9R)-|AA%y%(i`a_K_w{gS+l0W; z{C~}%n8~@gE%djL39wTKrc41!fd>*4415r0O&8()aC@ zJDC)8<_K9R&hzDjXse4Td>97C$ zMVdC0{_~yJxVFJB>w#Z0Ivvtn@X;K;A5L(Fz*oR~$ML_>ui%;%St?yWn5NqI%ORIu zpto{bU!1bE4(&a75||=GAyAj*29YQXum_ru>%|vFh^rNUI@f3rUr$I`7lkv z2t1c;TsFS-LuGv#qZ`2V^18+5j3v`OgjPX#E^&LFD^uIa* z4n>(oO3rasFagW-|L)iI^`EEDo;~jW-OJNouY`oaodU?;L*xE*y8qGv)i6}TkTSwb zX@Y_)eW^k_opm6c&7wRtv6&zyY$A~EP^?bs@5@1#*p@oznj5B2ZO=mcwVfH>x;U`o%LH&RTY-IlZkbcWp?0Zoz+i)YD7_;_`++ngcsU;R(Lu#f}69kFLb=+LBEWY*8W2r(+yw&A{}$U^_BBp z8$N;w#95;$Rq%t?ypEQ+M1rV6WfW*?w`8>RP&Y3c?Qnz3S|MU zO40^J?eQ*-;#5&GUo@=5>bkl%f|ri2Ub4~$dq3If!wfsl4do`WcV$=nrbLHox_aow z7DR7Iwm2bg&rhPkz{9P2_$_%y=rsWq+xD%s^7<}J9we#Gwb$h90Bo+F?ZKU12g!AM zS60w#u?|`=E(CC)SjSMsU)d9^fn$VI{J-lQW!CiXEt2#GMijUIAX#rPhh$W2fX0q(m*od0V z7$0NRuf~IEow9HzFkO~c0Ug~Zd{;#Md+nO#>hsovql)6Ki61Ir!uoA#}4#h#jDZfz2yg+R}rMkZbJB!f&^pA=8AfEi5?B=%cMc$K>+ z7#Z7t0<=m)U^2H%cDZhj_4+9|pWVEpm!Y}a?!6<)ulidZrBlUpc%9A5y&iR0vZOI* zA1Zrgv0^f1i1Wh?a^8*El(ea29Yy3E@B7d^&v#?pViAgr_dC*19|01H$T;9LwZ*An zC=+c!uV5vD#N3Gl)+gt^+>;W&?^jZ$+)CfmQjiDcL(|bUpRLPSXX;wD3e6X>-WpmO zk_^n1%CX)EQ87DRamu>1X7?{<`^mxaxsnektV2zYL(ZVq3QTO6L=}&!nquCXn5F)T zwT;uuqLWEqJ5p>8v_$f_K=ZgUJk2CwxR`>As+bo&ix6EKy=9T7QUeG1y( zlz!m|>zV3a4J1=+zU!oPaWYY;!fvuZ`!EL^L&)5O57u= z)*mjP-uGH0FqwjJMw550091bA|6iR`@`$VtlMgpKUl6=PIAjm{X?;8a z6Izh8xB9P8{|?Q-jTNoC?7PfH%xeasCl1Gy+{TvNo91t7{WWXg)phDEwd%U`>RoEq z4&A!sI?!9_&4%XeR;4%4g~6?d)fA(9#ro~_;)Kz;IVQ0}M1W7j?@&SBUVB8E2hn{v zjcvGA@)+xKy1KSnk70LZXMOfPAjhki{4i0Eg5BFD-i?HQyPkFWp9 z6qEOM{MK%yOWZqRhDH?j!N-0^!>w%P$dB}Y(Gi#}$tulCb&n^QA*h0NH_>Q10zX__ zUJRsyQTkwj^H^W1Rq!M*wEmaKZokQS1L6o}T(2i@2N;tvmr(qW_;iZ{+{ld%FAR z|KH1Fbk~z7Prz$DHMa!r?v8f%MteJZJNw`q&FK{aVum@Gp)5yC>ou5V01FNZX*oE6 z6M>j6mC*Sd3&0ReW+(;cGwCSOH75N+ttWLZ zB6+u+>-_QxFb7QPIgYPTwouKRVCe@#l7iISJdF`|j#g-l{fx`3?%|zP%#}XF{+jV zwfa_Tbq+zjHcMTyBqZrR_$A`AXbVKcMEM0hE64#qrR1Dav9E8qsouZki0y-&>c#hr7P)?-5xk}a(U<)XopTQjegL2@ zx+STYcTp|vQfy(^+{>O8(EssO`vkd8H z&!0ZuSN}c^>H8hhD0vrWm=y2hcXHf&99KkYRm67tWRMp4(xBtLTFa5t_zW@5OO#kof z*6;t?*?YYI@qV7p*xTBEb(yW87Kap7*KXhm*nEdcdcr|u(nv{_W02C5|+c7+oJ zCcmYm&s3mBQgcS2o(FfEi_IsT*xPQS<)<=&w=51SpWGyHIcAKqKmF-XpwAtRD#eQ- z0{kQTlK&$DITQjhB6~x3m)9wOin=4pZ*o(l&(6n*#Qi?K^nZQDZmscOdwWlxHT3`2 zdyny7_wuZu|CcS@>O@qPW!T->*-`&=n1nN$UijNE)qP-GE$W}qR4ZnavrM8=dw5eu zi@cYW=u7x;4cEWFmE<*I9n?ON^E$Bged6IJ;GsIQ+w8}N!-tS8~-HCx%()D>(a4^RN z&taxEu(6zAm*=bm)lm@(4M@xoI4>#GSB}t$dM)Lw4D`Mbkfe~My`<*Wd|J2Uz$~NJ zXAEEA3{BB7Phh6vTB~@T1mg6^?qebtQx^r18Zm9g$ZwnDjb?lQI z)UgV(U2uWs+DG6>g`TaE>k=W52!TWhDuhZ)=}Z#3;>GbRiCdXKD6rD_i(6W>o~>QG zYC|x6L$ZZbZQqqhOB=M@^TlUBAsMuMB(H%wFOd|&+iB0e~JeR3M=wIze&4n_a2l~-HW-jzP<*Rpl1 zw$I+7?dIDR%2>_@rSr8?->&Scz%28|f|`WpDD8Ev*g6~ez%1CZ`T4=CSD#%nVo1^$ zrfCGKfM|UIM z>0w3#++^dwZH@Xrz#`w-_RGWFUb!WQeusYF&DNDaca+t`efq`!nG>#elmGMSi|38} zZ@bSP946hA&4O%R9|FVU8@SdFc~pTQI!C4L`>%HpenYBK`)#dz=7RIL3R)} z>~TmG-2SwF5^Ip{hx9dxCL^jY(|}RW{jsM(gkiKMhrZK0gB2dPd-h-I1F=qYyG>(( z-sg~TOo_@{7t_fklij!f((H~^5Y2H_=dhtHrROh6MvHW4@3tSpe-{izjn@k(TiSeo z#q${oRk%vh$iL%f2Dwi{ODwxdDfrrCN%iidS<>{E$`CF)N9$bX&<)N#B^*nRaU}fS ziOkT@dfU6G++=GhK@W9u!N>q_#`!yBg&C7#p3p1A79+Ez?2T!k+32^MK`Doc&-Om8 z_mf#-J~Z%$CU zK282rO@qpTULr3m>nev?!3AP|&C@rRMro$o`U?IU96V8}Au9#8GicmoMH$#sr&T($ z3xTMekZ3Rfo3a(R0ilU#6PnhFgXX<`>lc>p%c$z}BAYkCwNoxB1GNh9cR0@)hdJ-S z{x9?Y2o{uKGL5GUP9TAC3i)hI<>aNeBiQZ#?e2cv$p8LqXXn|Y|92lx5C5;x^RZGY zDuINYPpTwq)HxFPSGBH_V)i%iOHwd~i2C))tA4(A1$=?0rB?XqM`0 zLwE%Srs=K^ec;e<{NEgk*_N?SrJ+;HD5LK)IyN^1d7O#1zZ(6ESGl(=4cNIPMqkw! zARTf62DMx>R%Uh08WG67n9;$eTI@_Xy4XGlaHy820Z8gpW(J=+o2{#C8ADU_{#;i* z$g)>hASV5YfBZu+<%`H%QyE1afMh$p7Y;S@|AvwKX4H5St2f;Uc!m?y#M6T@`gY7^ z@T;3>hTchHCohCoXJDL>fU@V9OfyvJv1Tep54r7pM|1{lQ-|A$xyZ9Sk#dn|U6Nk5 z`@xI6$g@X@e^?U#1uw=&nDKHsqrT+fj)jI0whsoY^nD&lU>Az-tpmcZV5xfvV4h3L z%QvR~9FRN8h&4^^-k|KW?WoC!^|V%v{fFOJzqJ~?CeNb!2nvFK8|dxb+q>G1h#C*Y zU<%OY{@rJPu9K9`&9yEczM4>O8wA73!`{BRi`{-f$L$?zJOP_MR|7w)WyC0FbdJOf z6?~XTg)*@F`RnWPsFRZc;as)<+tlI+L{$gtW#UMy#0Y#*ehE(Xh`Or<0u8#7lg@5$ z($QlIRjtb0Y9dv$zTRxAF;xmJ$ya`YH`(#&`%V|~Yx+)#t^L&0{_w%W>=gcLyMmE3 zn!_m+D3vg(Y%{~^!!W8fv!(SfL95H)LzBM3r%xNh;cx@y*rWz_vSO;W=iQBWn56sQ zdo8$SLypRJY|N2>DHL#j1DIsL={G93c$nIrSxZZSF^h23K=oJ08zU-`)7yxSIbv6O zhkD`H;O}r=6(1&)m@L(@=3r1yWz2Jw$fjul(b+bQuz2YGz1^Aga|6WO9 zc?}D8U`Nxhc43y!pzT!I6i4zt0)ZHTYyl#(KKsT6byk)3sk6$&(Lf@l=Nc z*pEJ)2^45b*`l4>cPD5a?5l)^GCajnU^K`MvALmG1TwYuUjMCzO3YL;r~Y98%hrEi zH17X-`r!6fO zZ}m=2%M$8>1cm~QYhO0r^XWWSwZeW?m!hjg>8=tm&vh01|F2nI`dicMN6b8$fp*tw0pauAzJ zDN5Dtrs=t@Ey0YKAFbuK$tgc_LqeRwUbxKWRO*QKdQqTfqk)MT8TeouF)2Rzk(3-q zVmH6!0~HYT=5%uaHaF++{iejmH5$d%KfRQOg)&_RjyWQtUT6^28z2!4noV{(Cip6( z9sS?!%F^r0GUmn8v#acXY)X6fQRLa+aT}y_8>Bw9uO@k;gfc|crEt|E%~`4WWKRVa zX2O%@(l^xwDp&QkQ)zwSOr?+OKXfi=?Y}q zs#$G$R1&p(_hZ$??>Zg9iSCW(v(D1Jvs0i7bZ>z)$@=}xGgseDp}CxsQ#M>-Tjgeg z6m|5>k}Z|hgi1g*MIz?Pm5c?RBU*@cZp(%>tL>a@y9o_%8_hKh-|KIb+_}_bZ%!v94F+5r5t5iA4 z)aA&43kC(6E=HZILkTm1nYk=giHrpiTM(7rC~v=zy1h(OFkx~;k~DI4a0DWoh$fmt zE)a`=?*{3~ruBjg@{Z7JQqm3n5q){}k4W9N6b?)oc_H?W9zQqQ)n|ybZP%2&%P34a zNRjfRDCx;4k26c}FI>khvX&2?9{m_FO+EXtqRGD70+gsT&i#8Zf@Coy`n1tC&eB9} z=`A_Rrd)ql-DdtOapmXwRo8n6uJtrf18eO}eqiZYVu=&kb>)e-8_)#2lUeF4kL`4? z8)^$V9=Rjg85dT@KYiM8f9u&>M|n48g}3Hvmhyw_51jV;<|0Q4p5Ww^rs$9sM0~qA zrx^;cN=mR8s4?QxDyN+^MMFk2pf@l$kSTrCgSpVo?8L=ovhU>mNm2gCbE=Q^qd%ecr!SAWC$!$ZG{iwW_a3 z`{(>5B1W@eo#vGq!?~QH34U*SH`>yd+s)|} z>QP>Hd^BpTVfi}D=8P(JSeuI)n7Du_*d%~?nQ$lyZApwz!lwTh7x-n4;_r|5D znxR(~OkmDql-XybY zrM)czK@RPpO|4O9ZA?h54uiy%YmLqjE^B*Ul#0n2HSeGww1lbkbde9GyH=_vvB*)~ z0j`!V1<+Yv^riD!17PJ?sBB1bYCcWz(DS@>CLNRCs)0HI{4TW9);Wmw6_iK5Y!aJG9tuT}07~8_P<0>ln6h8Ik5KC~N>=BLMb6bo?H3!J|f% z&QeDxL$U#fkfb<;0==1-6GjJxpge(DMe!y57OCD#~RIGFcA1V zSLg`ZjX}DNx&;)&>pSW2%7m<~C!PI|W&Id--yd2pmJPa-9$5QGa(0=zxgVN-JT%^i zWPhV|4z!RSjJLQwtj@_Do}T=fE3Kxc(K(~<7eG+$x`BjXXBt>8`lz*d<*2}koIxBO zz6K=%?YZCe0(}o(COk!}vBLLl{7nt7Xq#n4lL<=1K8Q}~g<(%rIShgMZR?C`v}Dn% zr9p(o3CCsihMivpQi|egk$BYN9t0&!`FvGfN5Z&SC`=d@IDwffP)dx*kRg~Zj9#G3 z{811&byH_(3f7qLu0f!Dl5Z?fHLleJ%J+Z?6}qtmthL$}Y zB_*If<#L?%DOM1MU^0^v*{+5naZTB~VL{*(#F;AHSCpFD6{|>7XaI^NxS(!KK1?YW(a^bkH>lewD_8XT9rcIF^l1<5+QHIGF3w7`#qg)Js zD<~`GZAB)OC1{B05HUtsTPb}8YA7erLwbeS3JPSX)R@!P{=NdC;PB$4TD9K-*E5{V zz%}H+(bKvgjTYZhr@v?L3d!mT$BNq^W%vp)z|a&+)vN6p7I&g!RI6^GT(okpdd``u zUP|Yva}M0JX_^OOIHpuc=I8l}rmb&(*4D4uJ&>xGRd-@;R*$?MJv@AU2ffotvT}vs3;5;K|?>cIW4+l?>e^p@p<@9cl>`&(>TSP6)Iap zJTB5H5-XnrSaSc%&a<7J`u#6YcfWp&|G$r?hX&2lahyUlr-UOB%=~A!oX(EE1#|cw zIIdpzsUsh9FhFYL-lMX5VALGGe_K(r4|aFHyz7+uzpH1-_-B~v8^>;T^OJs-jsF)r z_5A<4J3D)il+Vyy&JG6oG%t~}F%oA7C9O5eenIcyJkQWSH)lfRe1Ch} zn~mp;rbQz7b~T3^%3(O6Y`&e+q>w}^NZVJt+h%`w8*`y|)163}nV4r!svAvhc$wO2 zAC@hwT0Ur8|8d;fS@kPG%5JqwOe*!)^PSf?Y6N#yVZLtEOaO$dc8A_dwu{>9ua@I; z!H(8xHepup@mfDud5{KvQ@q=-d@3T#r)c0evP$sNRzasX83Y>K4PsPwO;mEyt{g@o z?zZ!=_1>vo=?zxmb-Sjx3tf;i#1ZL^J6ehSR^?DmSpEWEwsZ>QD`VNnX5?1zzJU!wF@Yn~WADRmt)u zy%lYe7jZ$b;IW|byaZR*v~DX0%V{2f9ephA#k0O#)?IFST&4e;N1^5}V{gqEb@N+q z&h_jZUHE1J(9w{;ay9G%XQq28!A#N^48croiV+kxML{&S3a@3&u z7NS(LyOO4;+2mD3siwg?w4z@>fdWLw=jU(E_kj&hEiF#PMs*wgK4|bwY15B7%yn1` z3U2x$p)iw`or0?z#ym$!Ic?A>hXBm6Nt*w+^4&X3((0F-YFEF?oALkF;ZO9LU0mf* z9fE0dBp^wF!O74BF+`H?h0P_+c2gZGum@)+CLLe<)WRe^IygH4hD-IVNvgn)n50JD z_G^xG9h52>f}vI@8Zh=kFixA7UT7GHwsU}gfkgmD*^i+sG#ZmH2imhgkgKJ+c8(_M zMgu23qe=)>mlDOF>b?c1>(!SkF{9I=_32nZ5e$r9_36`w&Rcp$Gn_0WN&5F|r9>p) zfO^%Mqzr1a&8=XR9j#trqz)MltJkMBvMU)fifIntALxSw`$hvMdC^9Xaw*_@;KxKO zFiK6gw!>Hv_`Y6qj>(GXmBqag_GZoUm{_V8Gxx!{vHk}nRW^s$jyiVrVf1D)iEfuA zaW$v!uhNNDUjmc78Om~`6D;YgQ&jrEvIYzK zde5}bbklBmx3pWT65yj|f0*xP`su)ay%hHA4%kby`_ub%^Vz^w2;c_X4rAW?} zB01}WWGSBGrFf2e;#rF7N4yl5Q>2D#@7WTmS`Pem7x2D#zF&^#`<{6EqP$#=@^W>Q zeX*Y{$9{Gz?0pG2UQWpIx(Ml{NhMEzt!^ZU{@-7AhJX3z|L=<+Y0K?aXwqpcf^?dp z*moM_%q?3?S`UjRXHU{SkJ%U|u{xaKuE++WQXISGP7U8!?gr;g>m)0=Z5pE zj$TMS`uxR4)9L&WOn6*^>1I+Rn|a-%Ru1jn&V0l5RQI00#;f*MPlA?s8^-;=dEfOS z<2F607`D4J-)Ljj=icG%e!BPnd5)5JLRmb+(^+i7sPr9nD?|Y-+5g|&+j&vH|84KZ zWBkv(JUvEoPSY3?fy+eDS|VVTr`)@lK_D*lZ3QjMZaGbX1*MnWIwOHvu{)beYmNv~ zVD&8&jUXTcRGU##MqMID(*q*#fo+)jRHY7X{Neaq@ z5-}uc%ut@;1ae2d+Ag>*VO575AW4CRpyXi&-SbGSAVkhvDFGU{IiA!d`8Oi@=7y}V zXpZXXS*whbH&4-NTVi@|bzq-9ZCKVwsTGSH?Spfg$zkwTEqiSMz}(;ac17``U{Y*$ z{Jp$ewrbB=vh)Iw_uQ=?Cu2+_P`M*tOC#0iy0Vb2&<%Y+e}+z3p~ZLJMIJ-<*29W0 z4IkRlLRR>9?+*W(wX)81vP9F>T#qU>Q2A#p+t}%dIC@-XCRB*;U#ItKaHqFpKiXSW z57^X8oByGn95(X0zJxA^7^>&vY(n;3qK#Vr>(vhx9p}}G+{ChtjI}TQ(xdW26TzLD zH_!75s-D{?qza(H}talAep-Rr-5FLs~S&VTPd z+j(67y`Kjxdk%7#%n*2m6GS-L=qPDp)AmL?Ti|~|Qb4uuND!#44UN4W&UtfTO(<5;B=Je>~^5o6w1^DjGIe2?YeA$PkDT*QiYd+ZKQfIpJ(zyx5Yhk#BXXuY}I$l-L+M zT~J#`1xF(sf$u2O*X$N7ryTK$t7V7VK8g&KNHv9nO+3(5=`~_oAjJ$N0;r_y`fqhh zNCFFvWH|#xe^MM^ZfrH30A3_BL)aF$o*~uz@j_D%6>OI?*H}suWndHIf$knY!?}bs z!4t6nIbw+fz4?6SOLd+5oHAq>Zvhp83rJGg2Yd#ZbQ&~7JOE=v&;%zKW&Z^7* zPKyX^Ql|c5(ZKC>NEA=5a9T)M47l=P0HF7XC74S>+{+=9oasTJx}_m%7itGKQZWN{ zNy}BJHPfkq=K3lC_)dkM(Ns<;bdDX{!P=*hpT~tz-kS>5N2$7^f=6t6NmI7M5) zi|jREbb&29{+$@&iMp=4B>m0!Unmhx(jn0y5P%_GWNLiqs~S);g9Ing4iLd0;W}8t z2$Xu0nV%B?fo6{ax8*m`z^FA-y0KX0WK_hNW=;`N={8fp!LIhwaed{D9(wG~QHr7R z{hVh1Oxe4JQn;q(MwxECG|3iF{LQM zQZHETMyS9ED%G2DZ&Qg|sudxmZBZPu)EW?*kN^`JuFnaoJW8p(1w`KogWv1xGzJwH zju69FP~a;BBtv*qmjH<)%mdSU14;A15?&rZkV7$*IHdG5I@h{NqEg+Z?!;0lvMObO zQk6*;W`Ln9th!Q4F`>eY6M!-}rp*4L%yzRonhY>$<ho-tgZ7b@mzV>11DeYI3dKE5zErtjt}fwQwC3sg)IaDeq%6gV_YoNxSpX&)S2iY*SQ*+e>?2s%1I>YRj8Zf|D)T z3uBlmIl5-D9#PImK@9hS90e}ZQN<|9C&8=1qWI6ZI!#fjyIdTUI5dGd#ByG(u1err zuE{E0A)WP?yDA)WULa{g5@jPz3ElnD@@a3S^tD{3Z#injlLCj^l5Z*INx_wsR~Y9? zUm7>^XQlBf(}dnzCirc&@|aM;o+$UAxFOz zh=@$0O(@H$HrLYOb%vi-M0+Ffy>zQ2+QYK>*0TZ^g*L86R)donXPCKK8Np-*90mcY zLdFa2#VNnw@3a6=dU!b!1+gvxl)WynKZeR?UbER8!2R(H<=nP7w`v>TqO_TB#a-go)GAb1)1*uG{ML z-Wc^ztl0Y937|6Y1wsz)_ZMnSb#-r%dRMDfNJHl#N%Y)QoMJy3b7Yk;Mx0 zJd@K+O0tFKveaFMv?POg&P`dTapQ#s;&Qjtn*=3@bI7n7ofC%1)Xr!Swzkh5_?vtH zV5V-!G!wx27?aYgSJmnjY=iU+*H~LYjg#jSXHZ{p<)kAsTrDTBLGE7eB~3= zv|^WnQ#L^p&dpT2f|FF8v#`yw{V`Xbs+b||iusagT_!sjH6z7NG^#OXOrG_sv~`d} zRQ8^g709i_mO@bqU&&p})jToPs^HlOoFjK-Xrxfj;iA%PbuE<899!SU(?=a%p3PD% zXKclMOUn_}w-?gVBKy;3ZNghL*;X|bQVgj?YmSiaG=jj3C^+3((Smue- za)edZamz^emQ;%=l77>c<4h}ZH}01rn(+N zjW{MM!v%~7UvH?-v~(bqR@wh%;wQU0I(>6_a(Em8wJ9d~Er*@K0rW9f`?#aaQ2~LG z(%`J>F9(#JdP2YuOw}B-5;_!Qt36%dvfn5*r5SWXR1>!P@YMkm=5Ua&O4y(P8G>9+ zr`%O~Q>7ZM>P9KPZ^?v~#){V!hdim`oizNntIRzibH}y600r=*(lgRzPAd!8giqO4 zlgrThdd@1BnHvPSG^vdg?e=|AXaV64$Vo5g~>N66yH1|19 zJ<(C~DM%zzMABkz-BwRxtYXpgKii#kou(MgKvzk{C(Acf=tZ?adCv>h|X(>nEvP?Nssm%nu4tcI7J7y<9&s8fg zkdMGyk|EAjr=jFS?3F6k zk4`DlH#aDk+u0G4j5ix<+I_Lx7;uibIw`bzY5}e_t1zmKqiSQ}^5pW>@fJ9J zb2>aZ{qFqa^!wx2$ETND;PvtO;SUF=mj~aTygIr3yAq-APA*T6FLe08fq`*$aDI7m z`1aMoIXHWJe)i_#SX*1YTbQ9t&eeEM3CC*JLG8@wS)ng3FwYsy8J6z3YVZUUYOPes zU!`%K^-{gs$ayhWa}}$uF;|+M(*&2(WUbWAKC4=dbvIg@lUXf$-;BVklBKfJD{Rki zf)i=ZLZ*}_?Bu{&9UGT|;65H!v36p;iCw#wb^EpI8eT=`il z@0;3D;2=dA9xI1V5jSO&^K#$9q7(oof~(!}@c7W$(=#8GfwAqf3@gNDML=~eoWrTV zhA(T|kbo*UK;4;NEwW*f;8Z%cdMiOXFnUcF!^{G;8akOl$s)u6WO~nBTJh3W^CA=V ziLT;iQRKP!t3%wBSw6%rQ}fE8(mw7Gcd}sSat6iEGwHhf!?nIy*ty+iz0hPvsa^$DE1lkE zyIOMv5KNHLCtCn23L#05ZbYt^L5=oXD7iy(f<-wV%iY_|(hlf2GpkU_Z`qb=M|!?` zs|9mWo^z3s${B!_U?NbpgBy)B{N}tO6h^I9s>N{uRd zw`_#1E?#N$YNf~#1G6S9=S+B_^|KuCdb}&%OiJFQXo5(pYs_et2A0ktn=4IZo$Hc| z)o?Btt9HxG$}!{|F*$t9BI;Ii-FUn(&PLUsh2%iR)6$v0cBIj98%tWW03V+oNxK>f zgHzuR&d!cck52w)Uv{BdEy(k1VZs;O&^`H0k#b$`B>?~~dsf&oVHEyqf_1AY&JfeF z%z8GrRn6ol7-cC3h$I>1TIr7&Ox_{ELG;T%qiT|p!Ngj-g%u{HRn4r=nMsep<`E@- zF5?B9F=~PTX#mu0T}@N@j22lcUHp;`Gdpn1wzD^@BE}a)!1v`wsG8Jk0!H9x1YpJ~ zU`UrX>!enTtFl^d>;(~O+NIpg+!|xMwK_&s$cow_wUlsKF;W>uQ3`UE(HrvG&Zh}} zkz^vsak(FFIA`}=%jKDBkpr@1hOexqtTq!R*N(5$3hnWGsfwhj(qRp%q>+!*nJ)Q~ zv*pHuf=yYV47VKow*+CQB2ouv%Vk#1tp|^ZnSm;;SPE0=-8d6|I#vs1&|9suG7N>4 z^kvbeChU2*H`-BE`Z(upZifjs+i({sJ$_m8F!u_yIp4SRysgiz*mi-CM}n1aZcgfe z43cR9rwB~x6=I|w)Mpk-Dp#F1+ckQO=>MHOf%yOP~i@B_-`Aj4w@b@DE7WJLm)Ot%{wPo98FrS|BgTK(Sm*n9~-ZhRaL zhaWdSg0H?h$Z=^14oIr<4S)5OECPNhPaeI)B#n`}kMf_JPeQK;wHi|-VwmID1klCf zg(~gVZYiQ6ef8C`UJ5$m;P?uwsY`>boT-^{obni%GI146>Eh}xS)sh*DuV5d(f5l6 zIz+DY2{DzM$_4L#vEY8d(-}B$@-a0a`e(shi2j#4cdL_NZk_6_rQpY6jKKLACM_+h zr`t+koFHZ#5|5tlk`~`x1ni?Xdu4Ow*$&V{}_r43o* zmX$c8QyxpVB}VTBg8}4A>(qOuwVs?7smi^ul%;Ep{oZV#fkSopLS^=DYp>mGi}gW` zSbDet@w%6H=6C}Bu+Gsr1Z6N!^~$4JBSV*sdn z-$Io7784pzAkNzA+V3qej0Cr#cR%-l&_PV7SZBLZJVjizHcNl*0bwbF{9^S^(_#qJ z4Tr6rXJl1Pi$i$a?=1)}2+ngPmg8^_@5Ef!>cSZ+-=F~tP$s2srvM8l?_@Hg*CZx% zOw&btMYAF}D3(`h>%vgQ%Bx=uVZB~ooG5u0XP6Z4<9CP=6qKQ5b!xoU8R*)0tP{@# z(C7~}SZ6Be7aY+!#7$Sv)YP%UajFUhAba0y2rf`ks2f2K^-2C#95j`Ruh3VU>asB_ zNDO&g@G=!p2z$SpEgiPBwwAP;;MFy6hgUm2*9;43zv4-#V{Z!Trj(UK;U=_{m2ZuG zEv#vW)@rNlIPD2H=R9VJ3x*RBbXIN&@uqxal!lakuX-!u6nKu}6ir}}t%B06wt8<6 zDq@&r^g7NNzQP%rq8RZ6W{qj;ZDHL&QV>k_Y(sK0R&lfHRFalLj+FLaQC51vTqCV( zB5!qy%05_eG*$Ih$7y;Sr7>Ud7^ZVfZcIlvtlRKoU7ghj;yR^1fw^9+!HCBxqq!Vy z>k|JU@H?koWjKQAstW&RNLFvn8fm^a%6fT&Yl3|D@^LG_;8wWm9=f^xxO0ALm&nxo z(^OJ9x_LW(jaX&cPWI+i!5G)2H>d2cT66M-0knVCitnxfdInlIDOd29ZC zD%9KQm2#nZAL_LP%@Au28|SsD5trYRj23B3Sv*^e8BXnb=|4B0EG<*VUTdg>wGUrH z#T(Q@vd|pxTiRWq0p%KJX(9(z8ljnA*Z?rzcdP0mUfjHWb64*>2M?*|FA{+~Rg$4jSDU+%=T;tCHh53oj3%#-NYcS8TLr#_C zIQpx6Jyci3N9xvxUk$7SHdSFBd}BkF-QO63RdO%O;_7N`6~~)XrmWFMjYTaXM-IDG zNI4PC<=%j++ibV0#BWt1JP8=M6Wy&UC6hhqsX0ujP&oqRFRsrm=@SIflJ;R|3+bPc zwNIXaW{g&i;oWgp8ynytO*#10$kh-Qg7O4rh>iT~rF785Fy%uF=2s_gp3Y+Fq!hTT-0Ay9oo%(G&ENf)bHsxk&Z(XWY>qw zK65Ir*2l$)BSLmc!`9aN#gE`~RE)lBEm|NRNyTi%2XipP(^8s7+h z4L5#i80*Fcc)s%**Ob331b)rvbVzf-M|1dosE+@91-y40{~P@ZuI=G1n5O!)O%A#A zn2ai@#=9kEXovw4fH~qi)wwKyq6%n|n(N%+VOD31UN$Ef3sX zCr26jtw3B#elISn6POCqtvFXUWvynjBak`F<4}CEJHJuq?BthI<(^u1!KQYxQ?CMYD!ZYH1McTnbkOFGVflM#Jm&Q~<);mQ+>O@`bF&J_E_rB9kSKMhKY>U( zc9GO@>M!2U*@}OSICh4(Y+7=)`=qxlKuhD1dXv4BVba#SUx{LCw zo|CQ8JN*#eOV0(O;+=-d8MAf`^#oikVr1N&64n(=G9evxe6wB-P;pP49uC@fd_x_6 zui^hCoT&d>zEzk$4lc@=a`LF61;h&9GL z^jGDt3*AwAfy6ekqw5_4y|t%$4{L}kbISo=G3Xwc5@bVVmeiS%Fk7(xRI)<-Ub*$E zmTP}Y$IC2>f#|QgsQ$faFEbsiofYK)1lx%AX4}IW;pXEBn9zcxEAzpE?c&GhX|;QL zCfsP1uuvE4u;6XYci}(m)^~S(fD1Q~h7iJs#}@baQiC1(_3A@F*Zcix1pgFQ8#`{U z8hrQF*Qv3KpgapOk3fA6Ol5CNJ1(Z&vn77bDZxlOJ$ znj8!}J)j7RKv~f-8!e?uT3;5p4_|PGFFjYQ7kC)`Z{QR)xyR^vl?OD(Uli&N7j(z?8WU%L~fjxj{x`zuHx6D}+G^9)$~ z!GwKC?~4jtEI_^ZPJGsB%1%$0r_YzqPoAH=fJ?r~ZZ)~i4Nr4Ul<>G)D5?^+CGxPv z*Og%pR2{mgpx~4->oQw^xyn-EuMR1loI}y7F)CJqK5co+(;b#Ul)dK|idy^CAm9FKD|7;^EDF=#3q28L#g=3WT+n#k~go_9&EXr)Y=wG)rq z33GsKZh4_;pJO4X4afVG_MkZX8~kbc*X8M7W|*IJnEjsm0uO?W%O{o?we2 z`rzyTC2|UWwE`c&a%(fNxCf?j!NDGY7g=INYsqeToA&81GEK#&mkyL1L&dtyCt*5H zJBRk^$;k=+Cp66eQZ}f;ECTpDnxXzU5K|g;4#TtoCL>vsmcmEsH}h)ip;6gkVZ)nB zI!B((5_hmSn6tOlPJs#qp*kxZ($ zBhk5Uk14XO3gR0lK^wbL>z<(T=^GA1rsQkY{2(tfJg;#{LSK{25IFfd-k|_fFJK?h;4wSsVID#lz4FBiB3Hy`d;MgRVk?Pf+DZw8loNP_lMYjv1hSWoCc5qm z(+`{C0--T%$PoyQp;8Hn)7%a=#-0b*VRaK@3;;iQ$iQ5rULl|g$<4XxIme1l(|$8` zV3=aU4=I!%)>H%b8rjC2l&)3Pz0{B~oPk@D#{aW_p1*y|fL;Cj>f^=x%U7#6;TNIb zs)DlXCJa;SdY&4++iLqAR?gr8_#EE%)JrZ%6W;UJvsd4PUr3*g;pyPoeE3U2N8vKZ z!H^n#R(C!35@NNY0kklb<%QZ#La(BNRltl7gUF&TBv;^qBo?T>=)SvUScNHJ9c;~b4)GvTjja8 z4ZS$KGgo)v7kE&q;5EY{pem|&11k?ucaF8?3QdzoSD)wm?tus&80Y!Tk~<;3TO|9j z86-&bF-!itEi2Fu=wRR~19}mY#22@53X>rkRKSBy-w6)fdo14?CM4*xspqJ1LpER> zMfA8>z|R!lj|cM1eK7Zz&{_km3Bous6H4a`Q1$#vR(wgbI4L!~2N2JV6&wmABl9U_ z5Mb>JO9$lShuy=L5xq}VQlgQw4wartm zhO(ND)lF2R$kP|IZDnG#Bf|_B{BYW*xa0?*-HmYskm`xQcOYC#7rb0zM%pB?1=8a} zE2%@j2od&Fd3!;JocBMkz+y{N&3ltXE12NxPyvAutv#le-v77Yq2omv56JZ{<2Gl1%c1 zyj@6RPMG#xKa)I6fZ9?L)9&gAI1ZynDP>z~pHkgys{7PZc53zsmhc1vFq5SpTKfWg zCyHg#|DbWODi)(@SbEI$%Iq_^i}p2yxvu*NV22gfXGNQMHiQiUYeI|a(|-)fE%9?T zy3`%8#=sG|PyE$S=YOQw1myT3dY>wz-UHTBfITZ-B7Z}&O^K%tI|7OuS*_lIEkGyd zo^9Bhh)<24w+|onkQ+UHd`hwy)`h6V8bh`$90-Dp3LYmr-B_|h|AX=&WA^23!Gpq4 zS9T&CO%mZgL|UH|lR&Y4lN6s4Ffx|Jhu0qTcQ)MX(+>10B^<(JW56@1!CfaE64}8| zsnN~t+4dW zO^_7gma94ZXQ2jaa#~RZZi;BnM2v=rpZk!*cOvsn^GmoBPs8;_-$OO*R;zbGaTwIj zC)VoZvu;jVn;BdlyA_L)-n7+g-6o_NJPh6CJ3bajgZEs_%1Asm3^_!uIQ879d_a1#0D;!3WE@ z1%|;(^L&;2m&<;>c;z^J>xO*bU*k=lhGB=%lrPUjSvBK#Tl6rh2^3kHr?KRbHQNF7 z1e?H{UaMPk`w86VVX9v99UtBVx|L%Ct)SP_f1@=#rV0(Ud{?F2<}uP#_tlm94yKhq zDEKumxEx6kIH-yBRxLWtg${~6v)8@|nYh1@4Z;3pw~5aNs(SyD4+(p9ac)C$y~{|! z?EU?d*~oa`(7|nSe8X+1Cv(7JVPfbt@8T^la6NmSZvmgjm>&E5;Z3JU0R}H^QD@yJ zHOJ>A=(UEPb_N-e$^&ibES$khznC6l1~VYk_4ivNEz34u9A&^@fpcw-uJ`9+Xw{WD z9ecFJBMJ}t&Dft!qWS&UlPALelFSk?cd1_=^*!7pcqn8(_|dm_akaiNaq+m7nkBDf zCCr`<-E!M1%{DVR!9|T-z`W}wSZd@mZ=gOmu|KOfr?4{#RFDc zVQyr3R8em|NM&qo0POw!avL|&D2(slx(Ym#q$1`gQj}ylSv~8mqex1k6I<3#l9RKQ z-IoDpKoYCrU;k??xP zGC@W$9gk)to^Kz`FqeY`p2vS$&u=&!4xc}J2LBxnhvokccXxLGw6njnyR*B$_k4Hf zPs5#O-#-8LPiVNN{#$>UkeL5zc<;W-gZoT=IHkW3E+|V5(9P~fL`2AGD&g;+NIXZG zq_IGf%+nZ4f+8}Z303a}nlO%zE&rf7i?W!A!A64TX%xnkBytquflODG3rp7j&hy=&zy5c3zTMe- zTK|vnd-tx7w!cAd1fG%u^zI#^Nf>7lL4o20V2XkXZpnDY*mc1SqmXb(Cp5&81ZXp+ zLZSf*lmrkd4oT2MA3mUOwo(7XhYi!!1xX^p1qxX*q0>1|(JhrTl=p&6FG%--jt-I2 z3%Z@raE3BL1dtS}5&BFS5+ z2Qc^r#~Be@XaEXy3lSNXhJI%0f^KJo6BwF9g*ZlIfsL74m=lR3Eb+kxLRxy~Wa(>i^g}!d zxis&gBj6UNU%&+?36mJqg93)obv7nEA(DszW!s*5c@GVc!b4)32!s_u##s`@1j!ke zD8z}PNG3>Bn(QWS85nw0xmc^GQH;kV7M52nReR_?N)*)+iFVaUEH(77Ev{<7<_k01 zgK5mhI3CbM5}x3AV6|Y5;IA6LBS7!yaG;pV>FpUZ>_L)9{qwQc{}TD1(vi{ABb-Dd z#W<@gfF<&OZ~ysTN&fHd40oR7|6}|*$bU8IBl4CCDI6L97xlQWmj7Stq&z1w&V(eq zRiJy=R>dd$JQ>>m@B!Ufup8*msiK81eI3w0tARp`lyN2hd{ zG^UagF*rOwH4a3zb-y_NC{4kDD@b@5a`pdsT9N%hsl{9khZ(ox6FNC{rcO5`K=k1UnbDT&T zA|u;M5P?p|bV>wJA#kZya{?9K(~ zn{8LCH(+2IztV`H_sVuffq~yI;0Lbp=@0J(`X{3a3bym$f%iHvgAD4y_B_C*#SeJ{ zO~OBM}<*e8y>= z_?m$&C1+(8>nK|Bf=eUyl^&h62~7b5s1clCCwn4dRR(Ml0u6G-7l*ZUTS)+FDg?_} zv_ekBsWmgIURgKcEAc-dp5e*+>PIypiVq()HarUiOR1QPk8A`?2dTm|J3u?b`No{g z8DAWr-Thb8gW>{#@BuLK{orsS=)dj>LoAZ;z915uiW!sgo^WD%LlPoH%GsFQ3sT|o zECaJYtO3)(o_2beHRkTq=FZ7m$?*|v zH@-4EfMq2$h_yDA|%R1#(?_rmgauT7SsSRue=teGIIYRJ?&I!w;+fmJ1hg+Wy zhu#{oQn?0D`Be)P!!I>JN^e?VI2>F9r{--7EQbcaGJZ|CXrPdN>%iI>x{z*IoXv^9 z;4GZVySmTbyg5C7Ns_6Y)qm=|ug*#h^5s{Jm>bI1fGE9bgy7J;21?D_Mo6KcM>%i* zgkQ19UmG>A3s`!wImC$~$il+$k^9{_iw=t|c#mYAKIqh8{_#n2)6|FGrVh-dk&sOw zocHZm9P;T>ETBz0m|uV!mqN15x&V8{vP5=-Xxc6Tdw#eS+WFxUAm^}EvILB7)C34D z>=qd2eG~69AHYZ&R8-b+5C(}9qCQ~A1D{{S1MQx^4cCYcCv zOn5)!5$sS^5+-dMnoOO2be%)_ivgz0s2o&HKIOZ2jxP<^J9(7RcYQarP@lgz%Uix% zJ)aS6LeM|JX&O|V^Ij#}QKTePxJt~YL|%}I&fRp0pg@W3o1H*sb;96}NgA`ooFoz+ zHjOk(BP@v)>>99sqAc746ezkWs5a8#Hq4`W@Q@OdRN6;gnD4*~i3KJiJEzHE9J5;z z9r&SXOUi~YT28})NZ%E4-Z_3-feKNf-Qhfc;6;b(5w^m-Gq!b+!-qiCuDMcB#6YbM z#I)-?j+&t3Lk**}MW9CI2WuJ!rJHpgQ#A-K{ zrh;d1T;TgGniBcxYt|;!I>&F{Bzbb^0ovI>NEk;>&(|EwaP|^9wG)Wbv^qdVwu=7s z>=mu4KYd!2NR`tnPvxkIh_&Dn<>VN#WHiUhYaBtsAek=U_yQ+sOhYV26UIj~T)U!n zQO&NX{m{&tfvKlSpK81y`rJ-=i;)v;WBf#gWPI_G&Z#^=yLX;_lpn=3-#L@4ez6Cl z5s0($8AqElEKKw^2xFE-eUqWg`#~K?^w4Iv0Hl*`P&paorjf7Gxtl1U+w15-Kb(>9 zT4Zx?+fCbX30XQWlMHBIyWE;B4?BUtTTZAm>L< z4m@u`epW;THAgqb3%yUyPmdd#pPwFAW{dpM0@sZ`s$U+i(jB+e>SrC6hLiNtG)~p$$V%#(VQMYYHmVN9lq@DXt-X$t zI9c=+8UYG!shst*1P)su74d`+=2ORL=%b+DmwJc!MG1u6c>eGq@Y~z<>uk> z@x{sIWiN0ny!y>!7brAhPWZvh51*?P;M>J6?ks?K+2pQS$$%8-3Q-pXesEMkI{ zF$O}AvZM~tQj>n}R=oIf^X%}|$>sUs(MfN~M=`;I^rK9R!WVl(vE&0z5^~#*2##W! zkQcihK9GFTPYI_ida++_t;5Cr5T`h#a#3bx^SIA$6T&%}^kZ^E;upIvKeuGml<5vl z``T&fTPr@NZPRSsLXtsW&N#u5D6dVMg5a)AlH9lpTMIC=7W3x_-QYMQ2WUGKRQ>-z zu<*L_+4<|^k($o(cRz48KX4u*G@&Gp@<4L&D-|N}U&2SqCO^pCnt|%Lf=A9$kBkVg zbl&#H*|)EfSP_uOenhzg9Q2L`yj^IdBK9p@G;IM3AN5u}L(&t_BU}?*Tm&~w0o&x zr_q0kkgpM-)$|Xb=-z)6j=_)#!a?jI?VgeK(2XdA*+kEQikl4RJ)5* z6D}#Aq}h`+TPn@EZN_&HW-T(VcFWzFBY6E8NH{kOteb!I5ghV0npZ5zNv+H^5H z%?;Z^t_{fx&FS9y=Si&Q1oC_J+?I*mCS|%Mf3JYrf2tJfls>bX5L%ub<4B;Q&F3J7 z$|<6&8(THUh@K3td%KY}M}b_b)^KFaNyc)#&$g~az?XsXJ1#AlCp&OGcHqC04DOue zx2iR0C&A0Y`wRqaP5yf~QM+{8Yo#Ou19T>3+9yerGMdO2UvB>N`toXYdfq#5Q7z6T zIonfpn?+@4=dX{u0b8dWxhpkvH--RrmpYDM5faNx3}#G7fLKqrVh?1!-KH!mdi4qX zL7imY-kg*bUVj-Yrc`{1#uKdghkY{xq{E3L# z8a8*NZnZ){i^%;%T-Un8&idEpzLvgP(NFZGw7YzjX#bH&e?_NS9?S@iqR`?j2?zoUq9$Prr<)$^QHXS7^3m$jVndZQ05)@nRCPaDSNzF9-K<0-JQ zCjHa-7)OKhsh>Re;CRDynI>(io=#V8C9Imm>FVb{FQl(2rsute6WvXK`X8MU9HRMX zcV=jxbcI~1s9`WZZI(-*RfNw9`O8a6vaMF7 zi;r=r?5x!)a_O(yE@APLP;1WYXq{C#r*oh4IfE4s=Co(bLWbeS#uw;C@!R;q%aJX= zyVRFIC{sloY;RAgoMq!d$mZL{NpR&xZxeHDz3)KV9>;9FtrM@e^_d^%vu#CkII-az zlrkkGE?7*wS%9`UlQYKYzsiNut5XqucTl1HK`7`!LA`@Ij30J(xYAVz8-0XR`Xgsq zDh|+ZLGX_aSLdSNf>e!KNRmh+t)R&b;bV9`B{JAjE|Y-&-eMWf)IW5FqWV`vVj@ZK zkJY;hV@&5A`_pH`yY!+@=X3#E0cJFYne7_T`aK+O=tQUv=#m?fl(B80wxPoo3etu# zn26nA*brAo`RIsUDuc`TphU9zGCpl2e35rO{Nr=t0SQC81xWecqbXHlxHG0TGt6i(PCbEmxXnjv387){_WqKP~0)&4bA*3kJE=v+xQA@a*;k1kIywZu9yr!|0Lu8am3U(FfZ!<3CLyp&2ym^iFVNMDpdU}JQ0kCjOk#Emqc#Ip zbh*XxwHkAl!m!~4iYQFV+)ppn6!C3#ld(8p3^?s%>%99^Fu^EP9D0nPl-L zk!Ulgx3IT4oU^Eha1uEs3(%(9LQN)%pa~TCzt9Kn6*$SzgzD$c=e-V`p0cQh;uWsN zX)3B8+av$A7Un-nB5)1L>obsq53R{O6}CQY<+z_4UleCWyr-*sq!!L+;*L5;WIkVC zbh%ITfNsP?*uv`=Q>EW+y|3(dpMFZV_v0|*CvSK=WcW0!ZU>=H!Q}p^*5#uq_)mrC zpM-W_aY+c zDX9HNVb)Jx>lak`G?adU&y&u7ES>*}(EO87`Kzr5&ue#2?%dOzcR(#s{feq?YlUmS z^zc(K4g6zGpMZI=?hVj8>Fz%xW#1Pt+25w6*cc zTO&RtJ`Ib+$LYx`c`U2s6QcVkVVSJHHk>5GkA1O!$6sDv6Xs4$;=Y&VBhod+%k+@tUcnj zxw^J*@vZv1a&c<$u`2QcDdDpEKPwSvMuN%Png?7DrG5t67_i#ID z-e)W#jh7qRmrWlw-yxa5ZThtN?#TRYBf4$(MY;{z0kqu_S!vY->ZN;hOB~IkW_Z=6 z1<(~QKWxIr*5JM(eBLrLznQc2?tTxVJ&*~LmqeEmOB$*_#4$-(%ocT57&bRORj0`pDUeXB(7hzn!%+`O`JaW_Y7^KaG zRXc4^8=H2#E?2?mcu{Um)1@P6zTUL_w*6vL+q!zCsr#nwGSl^3vT1;={f*;>O z zSGr^k=6@xe9~i}V{zYeHzWPiq>{_y{G{0Zg-G{|uB~ac#gMYKzuBt}C(>+(qO{|tP zz72NP+V8gdXqOAwo3RSWs=vd^QJ7N|9~&YBcTbU)yOUmH_uf0#U#u+%QH8a}yY8Cr z(WrJ*tt5=1Bj79+ph5-uT_OvsWT)@nLd)=jSV6Feyt#;tDva3gzr=ydzX>?aq{En^7YZrqw~Y7pL*5eq>Uw@P<`dV z)`EeJqO-PapgS^0&Y9YQfmb{*sC->FzB(v3m~Y2)ylrR&$vkPCO=%*wi>ZT{_lFNH z{r&vr`;*bx>*JGq-iPE?158N>k^Hmb%AiaFRw6|dFXc4DbQA@b%rY$el783 zi(Et1GIOQjF^Tcwl7uXY!~xoK8j!bzJ*R&mo6m6)HI^EyN2YF&yDfuUR&z`ZI(BAxbjIXi5E~qsY3;=M=GFom*B$ELXHY(-lf$ZM;eYxz@osScP*r* zH;!H>@j@wvA81Up8B%HH>lc4=jGLS#lFrGoj>A^uas4iWN3pzQ1TCA}IEu;+z~$dA zM~BC+PS4uJv!lt1jl~AFRWAdU)LzV++`+}GcD72*#k~C;d#-G2tRi^tFKJuV^J7+o zG>lRwwD+9`rs#a--O5tdnl{a{vZUB6tvE~Z6eQ^Z3cjbhNE+}&Rk`0urbpz4hNR`A zh}=}x_p+^CkBwRyx3hCC!9R8xzK^9%R#qO(VXm6;hHbmKs}`<%s>gWedW?ShiMcqV zoRU)V$Lem4?($vd`=lF+L^c|O;KeK1;o2m%(r(+hFxEy|Y`a1j42@%36|&TOon-Ca zap!JoMCuB|YxmNck$BDZUA&d3S4C8}AmhsI>M&bPljY?t-Bzo}hvpTqVhU<|{%4ek zQ3`RM`(%K&=YI}&hI_ky{^zc0|CImv7{3n5mIV`x#*9fNt5TXwM?zvrioDCfI59zU zHc2@rbNd^_iOhJSE99k|rGyuuMjcE+5lIQ;yd$Q|>zR}Zlw*U^85X29+jGev%x8^h zpYRgU(K|5T=UtaGK#P`#6G?b&!(H1De2QAz$Bgk*#8z_xGOU|6}|*$p4hX$xW9F zMn#rGwJ7%XH|Q{m1p3{`Fvm%A5&;L4ZEN=GN1|^5|L){ZAjy!yh6vnUItaF6;cex5Ks-;P+Asm@_3Zr|)|b21^EBq7kB9%baA~LPUneg$|u;$0JA= zGgoAm@|Rcma-gYRjfJlWVW4K~+``JQam%@8Bj2WVvl{OJ-HFiHUerrHBM;F_#X(;K zp+qgEF4smX(Ml_^Heyx#X`$Aa3DeCFULf!)gPq-P2gAW|uoIwQ=lP!pyZg@u=Dz_7 zY`AL>eb?7pa077@1}mXz9v-tfrpao9WYJ|dPKZ1@J-%4!OA}IbzP>-xSkjeaP4@W< zC0{};izjR7!%cFw_rbod<5BzNd-P966BKL(b*z`LI=*ZFc8#&H|HNOr{Wqi2*$CfY z8ry3L?qUKiv;Us$KP%gR&xTL&zeo9Xu>YtS&G1OF5v<7~;I|+GC9EZ;r^9!_f`~xL zH0woCffQfG3uHJeCR;jQ^?(2O|H(fN5*D;tT4l6qEv+Jmw+4&r%Y{DfTg3YupnoHF z!2UN0Lw@Mb`R0uIpV)8Aj+)M)R$p)G&r1^9IbT*ukHm8lhggu2lB~|*vYx-eIK$#+ zvMBc$sQEc`mVJ<4Bxd!K_rajtJ|85I%>Ds$oWDs}@gj*Fl0h>Ew^}vT1;={*wtif@ z^;?{VEHtd-EIyBMLQcg`up|!cint$E=C5}xRTcEf!YTvJu!;Ib0UER{Zn*nRv+owV zozZZn7Pf@_wg`x{Lu9ButvA5R%hm8h8fLM_p8R zLh+&2(#pt`mQerS)P`)g|E{w!flCbU6#!he|GU4xQ?~zhhWk(U-(&nb*ncG^7=?Hg z5-#b4hW-X{em1pkqxXU&5#a*qLyU8rYJshQ_o0NQ4l^AcA_urpESVts1~$U!M92Nh z--R(Hi4=oY4YO<{?ykYg>c!f0mI&w;E^bu=a)unoXiQLO)MUw4Y}$RcreqGfyy;wX zy{J7V8{$F83<-Z}77d0PcoUWz_mdgL#{gV9O8}2^I|3~?CkpE3g;7I2R>tKqlyNCVI_WgH086mo!L{gQzcv|}v6Q8ZP<2|8^YSwcZ&eVH1Uue<#3mfv3y`uoFIw6wvm zzC*vqY}#k36oWZ_+n>;wpl^`(OwfOk-_b40;z*f(IEu9Gmtvu;KreDy4o+EI`}Rst z1q{%D&Oryxl+djt_z_4?i*vN)%=GPet-dGuJSx9kOY|kc?qeVP8>+r zV9L*`ZH9zPTfVA2A*&Vp$1>+tZ%fU2O%2niv}d8M;QF$do$O@~g8w6LzLp%?FXt>GPJrZ5wun8E z+O$QiDOK7Ht!=E+EKx9rGG%=~p%TXTqV|gcymNc-YZiLnx5tYdcwVLdeq(-~)6?4e zaowd2x4v0$OohoS_|hA+f>*cs4E?lh1XsPWFLl;z!7w$66;2CX=54~+91i?0xv9ec zvSm3uGW(WSkHv~``AVB+NA?t>$aJON>RS7R)__+N58ebv#aLXpe))=4lpJaVJr9Hi z5@Lyhzvxf@YOr#?DGq|>B%}h(aS~z4_yXO|NPVd5wVg8>W~iMOm2tSs_|qnG`AH4>O{m+xTQO7(v?z%x7&Yg+ID!+h zWWKCuiPdFwX$3D`U7dKPOYhxyrwcLcIyabW=gQG5{!oG=a95Aq+=AeB5-(1ZHy5Wt zujlm*GyEpGPS|aN3bGwoZRO2fJ$g8a&}KbdduVg@8au9gU5lq1<4htK ziZu)&|H@|hHIEDk0E!j*a4n_=_EBDF@i$~XlxN%=V`*coDkklEzdnc3b(=<7C$*(g z>g5V!ny{Qw9js)xgNL+|S2b>&!b-V~(*UlJ5K9Q%PGmKyBg1O;N{tbPc{LVHqst` zBD+*K$9nw|*w5}h(aXAd*y(*B&i8H!>LX~T)9dVQ66+C{IY}x$dmr?b?Ug1|PJ}p$ zu@LP&o0hheyCaC4vwZKWhxhI*TWp6i7Tu0ClvjZ2M8*P%8EkQC7@(pJ=oPd?EiwWumTCs<3o1(py7IQ<9On9e$)YLLkGZ zEl*ju*5v-pY(F`DJbv*3=&!FaaS-X1@TZAQkSH=yVG!o2$!+Q`Qk!VJEN^*T`(`y6 z&!tT>FNMQ=S)J_0Q+WEu3T^iJ)VcRcs&iVZE2ivSKXmNQ&RQ9hV=OTg3-#kLd1iS1 z2PN-bt@j4dzK0L4-=@vNThfVh0WFdYDW*i#MG3OmEL<~_Uwe#wtQgF2~q(1h*ekO~14@^zgM;Vy= zK?gPDw611>H1%C`fJmS1(aN#mYC;#W{HRRRS(x=4$I{%=bLqhxaX#D&cNy4_$B?y^yr9U*n)Bn;i zXx>7Nz8?0yjc#|+b3416ZJh_)s;dRSbv8;p9)Ao&wB4NC`H1FZ-4;p{%Kg@})}3rr z-MTnCQFTaaCET)IE|A9Vu7Z`c6;zZ@+BoR{%ro5g`E|b(j9Y1a87_eqNxj7W!X2GjYdSd$C^4_~%+86cj zk>qrK7{@61dk{FC6*pJ7!!ZCkzSVoI7GV3m%@Z_L+La_|&_jEu2~Hjz7@*yrgUV|| z`h3^5f{>SvWgLEs+6tis4;|N@f|)v|QzAU8s%_IAE}9a92tE=XQl+bW+gpJH z;he$kC%Kug@3Q(z489Vu|B^7Iy9?U7bH)XF$^dSlr7cSMJr@2%w+%Y4WH`pc8Q=Cd z2+c5@f+ugahhnBw*n2}3=$mbq9`(^8cXYniU*0}1vlvv$6t47fpvD$d3KY(mW|4eV zZ3j&;uU5dl3gi^)eD)IL2Hw6VV?18Q+YWSZo?n>Sgxk3T1H%FodW}^tE6n5lvHcDF z%_il!*DRsj&vDGcYnK5E!|Z>+sa6lk@-lgMr}Je1WR$RxS_^IZpqtdE6EtC25_J~; zMc1F98o0Bdb+`SN>45_ol)dQYjkF1h-UlC?#)Hkvf| zmYWsH*)B@&APbvPB@#JV{$8Gg!wpph63{b(j(9`2p+>|Z0c(z zi?Ph7t4pi(D0Y{3)~DZV_;?kSUysxiV|Uhx55r+=5SMyN?Mix(u!08O-LkaEe1`1x zpB7)xAG~t!RlBo|S)3({XegJOvPIXWY4pydeabP4LLLNbbqY0}U3AC&7xn6XV;Q^8 zaFzcDxR0OD!tS_=K6Zq^{nu^($4%W`eIICf{g?9nf4e(--#+dCJjQPY5%1@3x16W3 z+0oEaI{;?!+vI!1xYdi~Emg}FTA3>3x?XN4!S3Vq(D~F4o0z%uK(w1{-5%w_>%myY|DEP2E3j$A5aimE^xjXqpnastQ;t|DTuZ|L#8j_No5Yqx^jNpG;}; z)~?^ujijM_Az0tA!U1~UO=-9vTR!qndOdGBn4{3j)k<-DKAI5>hc!0IU^+lQU0z-G zlz@Y)@(>mFr146sq#o0M_oF&Eibk56=u&bT%KjC{iAWij{Yw&NoXQ0X=J;(NPsxkD zo&CM%!{PA1Ey|bKSeFYH0mAVuUWosGlKP*+ucQ2@a+JG7D?0#7kB7?%vb= zACL1hvg?a4zCf?&)Z7)jvoqM)9qbNwhX?3_%-Ib=az+K3kvJt>%QYOwh)RJnr8xw` z6G^yk6|(u9O2i2cXCy)wEWEyCF)f#c?c{Q}d^ZYb>^2!CY|Ns?=!V6aSAbGKfOt}C z5GkRuh_UGVMF6lsT*)~>Z%Dj=;Z3NrgK-j}h;kB2#wiib0XyAeH|4T?ARR8Sv+wmM z&Vc|MRjO<_K)od#r!*l!<@ot>C%})b49qr8poUZO2y|~R{CBR*w|9KP*!yO(-o5h& z1788C)wf!ULk{rRrrfjn94FBM`YjN%U<(EPQ2j4pf%+Hd`TqXivjg~Fv@Pf9c7!Dk z{;`3OFk0_OgVo2Kd6pcCGnQO1CJ*$TX8Jk7(d#5$sOLW@wt?Gd50K;;DL#B72tPo> z;=zxcWvPCm$?%G0iG=%7`p!9v3W*S!tA^)T&JGH+H&9+6)m*t^N-~?jmYl#?QaIYT z)b1K5u3HF&4*L8Tb#*LH>-wXt>u39W`v>s9PwV<4tZNcpk7Al+Z%5b4hjcfC{`vwv z8~k~&^X=*@nuOQ7ezXRPZuEII#B{~z#FkJB**uLYPC|ktLxRk22-LS1l4F{5+Yw$q zq7Ezj-dji-_mP8pMKIVtE}Mo~EKw3(_hAfuJ%%-`$^fAL4B*W1LHA%?3s6g1yC6pf zQPtUGk zy*PJ^SIO&@BYCi8=TtvwT78dW|9A5L^x4ci9{^e!|Et{pHQe9Zd-DGtY(BK`*=%_~{hVJX`M=_QFXi7d`9Iv*E6e}gr}*FF{91Ew zOZ(M%xqwn0QczsGL?_h}2!me!+RrT-l(V4neFA$xyyhE6e%6#SD5d{!lEyeBDCh?$ z7zOA(%EyG>qb!kB#-vhgu2ub%kBQZx`AO3xA3%BBADb1Qejnl2N&b6R3#~u@Yj^K? zCI4&qbpHQQe%@dyXJ!$aJjzt(y859e*Xdv#Q|Ewt^vQa*zqenWEO_?NbMt5`(V_06EylJ{ zemYa^uE%@KdCLxUJx%<5>(+T+GU-m{!;+~_<4mAAP3RoQMIq_@oE6k3o~Q9b1Cld> zE^-X@l~Zg2j}@QgiQbnIClTgRC$70Soz~4cIF8xvIj1)?CR1`ELL5V0YmwL!;uMc* zOeM8j`w?fU{#Rdp-@sR0c~X#2k^P*r35|({t1A~@WQnA6a;(pjdYrkD=W+PFp%~&g z-za@^dc6JP>9Ot#Z8DBHW3sn}&MzPuj{}+uJi?<-@ zsn3#l0ie+jmuS6!U!4R!6l63Cl;^esg`J(jZ@hOc$KWh`Y67@*M|ylx{@~hF$<`m$yVrUX?dVt$@ok@Y1t9FfS_nQQJ8zn1dJj zsdAW(o>@qFD_9dwE4||NQYnbyvVA*wyeV@D<$`!oT1l1tq&iLAkRtEyl#sDRGuT0S}>87=w3XsO}H!Ze)#fb4jd?Mf~Yu(Y-#pXp!Z0!|0yaFNl)BE z;1xmxaH~9K4+NVwtt-~(nz-Ml_hj738e5B>kT5v{$v4Vn)`;Y? zZ;c*8lXx}y`hKc;*(mO4hOgf%BRJAYwrWw2Cb=ro-l`yw`n*P}s}50NC>I@ql+rbg z^oN%xFIC^*jA2PZJuU9(TXh`m+C^LYk%;Bammok3?UEP`9g`apAE2+r@M~vzel5~) zB)@({PI(mOc%XUzzOB>B?(QYjzM`}C&odrfu^qU3UpjMm5gX6(MT-B z2uE|8JO<@}!3wniA}~kbo^)L{q?I+04c)Xec-50oK7xr7DTf)H=SP{;qKAhTUQJW` zQ1YzX$kr{|Dl@i(86MGCak*}!Tvug$tu(hQn<_YtJ!S`wIVDl23y#-0$WKgzEsGS} zP{POLj1f+v5ssn&6-RS)c<<-amoH6Z@BHfG@aUxVEY6y&n=N1_%Rit5*qkWANg|)e z9+kBUDy<{S6ARH$D!_eR_=bZ^t?J%_n;ZA+`l9 z<))vV9zPf-eVTb`cS%8O+AVzyS@`iHeyggb2g@1wbaMjkNukfsM7$eAK1%*?xBPEY zz~Eu(Kkq%?uhjqC*?X%0`6$2cLeFD@5l#{&VIdTEcB3y%MP$@9+Y~2)vIOduj@V=ptLZyoLePgq$nnc^k*k9OGC@;T+|OJ+ct%|a?pKUTsAPktLMBNwCV{HQ4z8m!WRRx zk?XB#pV;J&yFtm9iBI+}qxXv$Q_(l@`=)k$pynoMo}Adf6>$3-bg26xXhOM=z6hg< z=3gxG^c|WYICU)*M~dG8{UA z(!hlP8bqbbJ|oZwM}#ca%!AJEwlY`6b*cD}9L|M5({ zezN}_;n%_btIYfegbEZ8Qmz@)*JRF-@V`JbC8GSV=(jNAoFwqqKU(58IF7;N%=_r; zJ`$5c!P(payIq3fbfN^Sy-W)I-~au8P$ozO{&9{d8fOwMScXEJsMm<<9g`GuNpT!6 z(3B*EW92Ou2(_D{ooTepB3;ArbVa6P%FETEEZpN_M3$hHvR6(Um$s#{_2!l7t90kt zRC)!iDqpHb$E+l=n!aF_jQu4YmXH9>pZ-p&j>D zInzxUitW7%hzW}$eQlH|z~Cs_@uBw|{EhvaVmaF~`YBg*5Rn-C7_+gtAH;8 zpQ5O^XmOyLxHD-BMe_29;xEJ#}y}^hbhog<`y&4LY>hvIm9aQG= zv9rY(^O12fC2ueEr-yO;l1jpr9r5?S15v&V%r(V9&;m$}(|h1xqyDcSxK9R^CnI>$ z-hqf|NUHsGAdJ2pc^UlTCbGU)Ny4cM8@7c`m1q>y9PD!g9jcJdw8={30y z_hWOJ#t*{fGL73fJs?NvNFxKKq@=3b6eN&9-T%G|Yz4zd(&9gAYyF3Lidf^!xn{oKjN| z-l>WVvFDwQYnns{=ts@C`9O~IacsdRiGQoTq?sD`E)L?7`p%;J?>E@t)s zy{|Q43&77j|9d5YV+immMg zj`!LF$9s1U96!a4zZtw+;CZ|y)A+-K#MOeJy60a9ZD67GP@w$$e1p0Qb){fkz7Okc z+}D9@f>yJaTRYSjCAC|G$wrq)7l-F3qtmmilZ#&tUyeqB6S#_$t*|r+qL49{A;&2> zO{B6TV)Q=GE7%)?ozT+n{~GPq^gYkwwIP6Zm*cSa&^YWZ$KlyS^`ffz03FBnMl2MjMSd!lzQx+UK%W@@-vjYHueDL?=E8wMC z>uvTSpi6YXxS&{cJ6FOiufOn-bzQ_R?iGr%FaNe;3ruY|t|~ zBRwB%B`3v4KTw?GaO~z+q6Y~X6~`$bK=QYfjoo3%Rr`^JFuO?ibOQT{_;V&UDP2OQB72P$T&NE)o%iMd>K zH`~g!TECW!OXh#mS2>zBj8a6K1|j2w!NtTywD>$HmIBo+yF4aV?9CP&w|tv+)m^$W9<>pB`hK%JOLYI@j&-~ z?>^t%Eua4w?(aXH|9zC75mdhrPA3bb<{JqTlTb2_gydL~>0;0-kwHczI39&F5?*VT zu^>hkL?Mpz+wYXh%!|WJxDs?p6u7D_K!Gj)70j{n<^tqfmpa&Jz0xeXPS|ad{IRbf3WhCyRcfPn+J-I0_-9W64ow@VHzp7W=z|h5O)ty52+3 zji*5?y_PDW)NDPfUPhssy8OiR9dJV4$y_+gX(!#AhEgEkLwBGi-`Z(z zY3~*a^5O_)E4P;XHK%>QyG%()Cp0``5jkR6BEMf;u$a`4N^!94!5Z;#Ro*^}NT0Kq z_<^Q(?`oo|A3hwQAb23G^Le-%w)XRf#4%r+cR8Q=#}3X ze0opp`tQQ7wJcE*Q${cNbKZSaCfWDtBF1Og^u}M~Fw<+2V5$t+n^YSdcpyeH~0a97R;8f?aCxR9k#{b_xt) zQyH;nZ}Fi-4A2!*Zwk{Nno~o$l#>a4Yi2jt(wAG$*$wH?UQK*3sDuW68D=Z>1oLsR9LvlK7G()LJjGDyEQo=7&_O1e`QYFw&h)_ z85ZZ>EW@YeJ*o!c%rhYm&1OSwlTFF#yb8b9xDnHY7jV<6sQs!x?2237M>7G|GAYe^ zO+wib0|!MlfM3nhJwQ`>Lj+0*pHrbX(jkC5Bd}EtMc+*jL6>Ds7D!^6NP#vpVIGD! z(}yP8msBAyT*JUJ+bHKY+s>D@rDMBwD+Ma}XlxG(ylDdaA;zRAhM^-1W-!$kZ{?b;{;FLQ*3ScTk~KU_NFdqBkollslTpN6K8w#gkm5q-+2ep-XewCeoL3Y6(EW zuz+fK$~B)Y#+*huyc%1!7$|%+Vtk}O8kvv$vM(iP!?Za90hJkfQxTmVKC%z57AZl& zkwV4cpP)S0z~zNsK|%@|5sng;^!4ZsKw&N}(L1`RR3~uojg_%jpr1Tq)vwjMys#TM ziQKJ>V4dMm!YNP?y@aiX;df5^S;Bk%9)afyl$q}bOk<(Y&lkHwjV8K`E{U|Ay~1`v zy<^h8nP9E)!7%Z*6cL@p7R_Wy|2^o)ALZaLtF$}j(Pq)c)U1phl_^{DO z*JN?f7(~&*jb*L8IELHUk0lTpMt#i3)B&02EqhHj5VGkP2PimsONA6c<&)M{$0R0d zm`6B?XoMwsJuyjyhnZv|#BqVd75kn*dlG%WsJ3bnhgz110ceCdN#sEDYGK&@OctWX z4Z0?a8X?jhoI(5DI}OS^^;$!b6(xhu$#RCJH7^OI7_EVG^X0jmbL%|@oM8*JeiyIqSL8SePHIT{ac{!n)Y}kTdI;^#mKQbI|Af3+x$hvv!4x`-} zq&uj4Krys_kO?mo&Dv(t*(F&vkM&0Wr#6fEfF5K9*1nRQy{LBT*EuPl8tv zSQ2(?AXdUgp`iKa6&qp|ND&zoVd_DH=~2tQV09MXI@~Kvs50bK(h$ckvjU4IeNJ$+ zFfxa6^N%c1&Lf=S%y`A+UIVlGDBqb`OH?Z{t8arROzOe5v|*X-vq?Xq;<}~PoXiS}H(Y+NCt=(kx3j!Qo7~(rp7J@|N-IewN@HOk-%>m6fU+CaZ8$uD2pSj*x{h zKvy#=YId>~c^G26aO=h^ztD5L5qTSuR61ty`dM>F&M z+ZhdK=oSm)$Z0)~Mv8Ai{gD~IA?ou`P@p!7IK3epaWbV!^lDv6<%7r=7}Y(5ix%$1 z%sFoGl`_0q{p+eu(=?F%F=J8@KTTIuZC&TH6hUjYZi-ok>7>HMue=#OI(qd0v(t*> zo##v|cnh`0n8~dJHPHG%?5X(PnV_;#fgWrJ75Us`wCIU_IyCUHf9?7IDT_uC6+DA# zJ)?0JO^ICj9Ke$MzlP6-!}9%Kdpmni`Tvjc>ma|WNWv{PsmjOe)Z5-s(e2+s%Law`~1C zA6Cx)eEaS5r}h6Bzb@FvwXc6$O+k!weh2I^t@-}Xo#%Ud75%@n z_q6^W<=0{T*D(QDhWQJ75d1Is6O)o=k~jW}ek19O%%RGYi3C7$D-lw()dkfx!%2h+ zz=DL*E2QT21Yu68{I)jf(q%;c@o}dI^{ZYYYI7vc4|7~gnEgiH;&~dAe{9aAOvS_iPi!VDr46<6MlTSGmyXq>=pPOP%tGvLPEN=D0t6 zMT1JZW|6q-=E2l}a0O}KiwwJ6do$2*Ag*@mbm3$im*=oFb46vlo<9`tenTINz|tw` z`GYJ%{bG2HXx}kWWpJQ_@D_f;#Fp)3EU-!A^A`R-$$D+JA`EopK^b(w<4Q z68eqHJ$FGq(IxIze0Pwk$x!yYgqz-jONk{7i)+ru z#QlCD@6JXVPQgiI1+12digDKRQ3dOJuu@L$f}4WskXK=)k_2m!ivIT@mMA#6xOjbW zfNTnBu0skkqB$H!sn89i>3f}u+Ak&UR;`P=B#)KV{7s5SA|+vN+Usgd2&dFsZ1UIq z*)>g~;xFY67k`<0q=f&q#X{)f0o64QQ;Uo#$gO5vw?N)gLu!rZT zrczeL#1-TC}{9AO(Mf@2PMk7CogCy} zGE@U2pU1jujw%k41MS%-Z9Hi@T#yOeBjePfgz8r1d4~}frAnR2gf~SMN@t47E>q3Y z?iowz!-oxBE&80rG+Zc@^xv;QEGWP=0#nO|JW0$Bw}6pP9Xz50j`sD7$7dz93$8Gt zW{%$;>h%2sBh$h(Yr+SN1pF5HImQ461aIRwj9$QR%U{lEvSRn3L+{jk^~?NlR-{9S z2k64+^TQ+pEh0(>4BO@~cs-c}_bd6hni=;OWfsGm8b@qK;*{vJM*6}ksD+q+fh0o( ztks2SBT|m&&>ex3RhT`#cRs`7GMm6j7-bCVFI+V!Ih{APs3>Ih082(~v-fD8#b{?Z z9F|n+x~8VVH2RCNI#-(;_aiku0BrS{X~5~O({jCZT8b9vy(WJ>-Hk)(z<#w9_Nx}y zOTquR6#S3v;FtDrwX}z;r9CX|C(QOCZ)c1pDp34 z<-lLH0q@$+kIVb{v1317yS!T7<<;t4cJ2LqdGF`<>b)x>C(98zSr;O$IKgEXuize- z;Q#q;*#GlC{(oQAqPARb)wz*IBTN{dV|m~V$nh2%R9Xj(ru?DnY)nRcjKdKm`@2!o znp_Y^Z@I?4K2BZ&)vCCbbY1kg`i^hK_=TeksvmuxVWVnvzN@X?oP%j6Ql)fz*`fx6 zc6T`6a4pr{{a19=`Ra(!5^KYl|979ePH5a`27zEZ!}&%Nu|D>`>gV!n-~SgW2}cvg zM>9H|jZ7MqzQ<#QEPy5Z|2wXZ%CXyc7fx)Yy zba3TrfhwJ6cw~-$lnT7sktPD*%TPN%|Q*N3`kEhA>6wW`G$0u(|2$h%gH+_wv zThPx0O4P_+!oUt{j-hX%FLmkugBLlCN@00@#{9zv9c z!wkEpfq0fA%7bPJ(tVr5L2aCWCzS85QoFL|pj@7{s5p7|5}kG=X7*MG_Tj^ZC5_@* zxk$+Yx?r(d25%q$w}B9M|30v>&C84{wi){Sg!dfPt`j`<5|DS@Z6&8-OeImdrCv*= z)aSaYkghNdT|j??Nm*gU4?aa6L3h_fih3O0HI;=d`rmoD{?{y(bq$jxlCETW6s3Xk ze~#1`TMZFMj_b;V(DCET^iBhAwRY@Fd#m;#pL%8U-<6BQ2Hw=?(A5$n<$9b|$i7Lm zQJQ}phk;x=@2kKqEL*8q`@(NM$X`@Dcu@7`SzfiG=l%t$YsBo`fYu6FxWC7to+m;5 zLVQ&&=qQiZO_rsyxVtp2F)lml&~;bzzQZKnS*L!YQ@&1R{2&alR6{>JVOKFKR@d-# z_&+@MRb=mL9X!6LmN0o&&HivcT1lo$|TbTcnc$G7wz;M?cB-1dmwAEo*O=P z6zo6uR}=qzd3tnmc6qYCKHB5IyU%yZ=f8HI4WHt_kMl#zeupUzX9T^ZAxQ+;XlZG) zGx3{+g4BZSEpKiyxE3pkJwO1z>uP)Ky*)ckL zeRh0$b^7}368-S{0=>CB*+Lg5=NGS!-yEsOThQwG^z!QB^!qpJ2>{p`pyT`&Wihbn z;DH%YfW!>PF`5&sOTSCP=eodI$P!&sL+c4B;lfGES(L#kl`R8GwT-BdoQ^Y{iz*JJ4ntr%ce8*XT(e~=7@zEoT@D0#drm=kfjSwr!$GzZ9+I=9Fas)xj;CR zGsaDM5*tvh`2y;w09g@nTDjMm^Pqwn8?av&3fm>4sW60vz4p0R6zYzP&i(DHB8# zRLv*c&QV~X1TYkVHfc|P%5DkYLJ{R8ln9El>wm*7AtBBLQSA&A{R$|+-Mno!K_Uxh z2C*%4J0mds@j_#d0k%t-TdKH;akNQkPfw4S(Nsa2(1~22l<-i2-rOI4)r0OBCx-AA zP$q@MNu=gL%rI9*gN8_ZXiO3^p&`Yw58c7DnDW1|EI^x#!@qdYb7vhVK$9C9WeOHY zE`JyRAyhGn>@6VrP8j-DU#HP4sBla;y}^>+5Tpnqf-(oFKI&;;MsFZ# z8d${hqYY{)Mye0R{hZCUtWsUUbitiiP@t+11|)(Sxj06g+)$WG#l;DeW}P4s<1yp* zFUIXOyQ|3nQ(6uVSOWDix}C8Y76r2BCu`=sDvzz4nt0293zf-dsHaP9Y1iZyo&#Yx znPZx-Pm*E|oTBIg!d%hT{oh zlV;a78#3A|pzLgrFl$3qAcx z^J!}(x3yfNZ#iPb;{u1;if$1VVJ1M!LyvQim&T0z6(nAvn#fyA1b?iSA48Uil!h70 zM7%(A%&$q53ngV8S(A$hI!!?K(*))S2wTgEN>~MFOd^cjwK@o@OSoiT=fk!uaH+}T z5>UyNTMP>??|lE_$f8#10Ut*({9>nuMlh;|3)M`c#2 zZjbWeTgwVvW?H!#UafUzoMq-pWrD*QatMT!2pKQ56$f^~-&lsQvhY$OGaOs?yJb9% zZmF_05|;E~<^{chKl|Yf^Qm&2*aF9L(VuWawh-l<+^`Tzv6i*D@&EKKecIb1Tgn_v z6(3h5T_MiXY#h@tUMNOQW4zcZo}`3pRV(0$F)Uoa%{B9LSq#Q4K-q%CiW8AI&BcW#=&@NE~mO z|E2Skaj7RJmjlL7HdY~s89Oq#&kW|s_KMRqR_-QC;)SNNl3fP1FvfH)Oj~Dgc1m{VAt6Hb$<^=JsSy3bwso1%ws3^!yN2XH>7Np97{XXOvu z$n%jiUEHdvu)1!5PA6*qb6-eEDiu%XlOpNVoRghm^%{hnaojfxg_YaZoC(nff+5HW z&XoP6|E3ATD8{!Uqf!kuCR42hu*{J!Y=V+hY!h&x6Exz&xY`Aru$Y{M9hRMsIasQ4 zMzksBbD}kwtTSp>igh%KHD*+vwW_psP)k(Jp5+xRticvxnKNHbT`Is&jI=0tHb57| zjSLN->p5N&lC3O-LY7i%+j#P*#mciaqLs&%&9^ikF@1X>%Qdp^Hft5$Aj!50R|p7F zu-2RqJEhQEsqU@$E$Cv;XmBn&1W78ps z8hpwk)wkTV^t3Plxrl*5n-Z>u2^T-WisQ0ST*z4P<$?M{i+W0E<@0YGKQ-0C+3Ty* zqmux^rkJ9)T6U%nqz~NMpS!vo5m2*IDwKu!azI(v6C+M=1U_cLbflK7_MC%Dek0UC zGU$Q85VpF|)d5pa;aa)^vtfy1f`xLY+$g>Iq*$$RSClxgXkv?FLF)H=MAv!7Kj8d7?LIYOeGrm=&GPbs!6Xi0#K@F8ArIi8(#0}wkvgC~NsILaO z$mb?ue6Ad3Wmpi*2k2^MwrdlQ4GmSC`3B=byV7}(566!4sw@;A9kaGTVlBMbe6BWd z6sdo68;aieqjQUZ9YYbTmlAW+$@^0aVnb%$)OO=-nP zURWmgR7#$JHzE7r?3kSZ?W-16pctSxNlb)*Nh5F5n1)ojzW~wMAj#t!i?S8z1hbql zR%7U@u=G`r4M^V^V#T@7{oZ88hvasbxC_ z_fDx1Kuu&R;etd&Zv?4j;7p0>NLv>=Kq^V$5=}YLjK44|1UxbV$3gNX27)&y2q#m_ z^}bTs_b___&j;wr8X)#U6Kh9DjOcq4z~pvzgcRY;hMG2C>^26T6X94iLOF!|hC*>9 z_>*yDcvv^KOlvXTDxyxtO|@rYfG0S?fEA-#e4zS&;&(9gq)7eROC(voziN2 z)~+TyZ$=x`r~pl=ZwKg@3UHkWSFL`!W{o&>5r6umKr&m9{eshI>J-oO$ zJi9tQxkRro+->>SKcK_2zoDN`&yKecp?U}SE!^W%jD&(*iJUNLv4S8@imjYlpj%BU zN=oaf*jTtay?S}Fh0b1|^-s@!xHvug@#NLX+0_<$b#ig^)8X0G;rFL6Pp|$4EcC=Wi~~UtgYRZL4<+V-hQ0O{6Rl6m}h8XGVL4KD*#F z*B-DEA)%|0tcW8IC`sxzy3 z@4EqdnUhp~^pe{1o9I+&UPNvb?Wf}L_k7+*M{7Q2%r8JBLzAR*J3PDv7y z-d4Wbz2${+^T^Lqe&5uFfdM% zVhSCbhyYA2p5v(>!&hJ1lz<{R0Pf5$f^0MiX`~EWy_KL07#-84IJQ8ogbrs|k%(}F zx!yBZT0Ga)B8z3&(FJN|xs1v56PlRGbmXcVG1zR~6Sr8YVaH4}?3A(Smd0-E@EQrp z(iBh07T5%tLVSX0oN=vr4tkttmIWaJNE=Ri{jUrGTr^20<{%oSlYAs|n-5C#l!5)wU#REI&0^jiSmk$FO8z8>@4+t}ic*f=&( zD6m_$mDo|1uik2*LgwdO6lZzcJ!F7>VzU~y z%;zA9tZ|)Fu~^O-=f!TBi5z1g2v^I;1W~uDapUpA7#qca7K#D|O><-Z*5O9SY|L@d z4198Stn_MK8XUYoJU>4P_#%<`ZN3o-PQ6B8 zfPN(i$ATeFbZZkQwNf0~YQC`-M1b1`W@c)YvE5o7lOkmWc1SHILVXxO^->a{6zYFt zQQG-5$u9~`f(6a@;|=BPzH1(yDS{lBhckL(C1tUhsHk>w0}NA0o!c-U4aSfg_`ahFjP5%GI$>H&gX_uprUdGjcb}jBLa6)x?MSsd!;X?M2>JuM<$JLG+sb!_v?lx7O^8i zwdu&>nD7cpp`atGJeiTwej}X{Kbqh;9^>%Z?Wet^?d~`cNEfB8bQL9Hl)|e~eibWC zrXvW)x;?cvv>l!3K-;lECpQ!v!%BxKhq%Ki5+h>TjEY=Ux5xIDMVB`M@&nH~ds`hJ zNpAG{JE%eCg7@E8a6i%M3>`Yvt|}1ylXfmd|4o@|igR}EH^ozn!Oz*4po=jM8%9(v ztJb_)+FRQGUcpL7$L{_ddc$d@V=WR!-bUUiBN3_r)BrDDHV_Jmw5vbrzAj$2eyteMlqKdPQypLgiLAV7(A_?4?Ug1| zWfzZREb12HvbOCbV(G%x^JrL*UD~(yAxOxHY$uS@ro%tKW^=*zWy%pW=WH zP`l8s?DXaqHCzLVYeHAW`ph(rVY{9$PqzFL zCR~*F;TM(*=Ile8>a?(?_L-8+y^Ab%rL@aFVVf-refynq-L`##qQfx$(Zm z;DRkn%(r?>aT0oYq_LX#N=h6{Wy8D_VI;#+R)LmW-2H0yG}y-4)YG=%OEq`nOPij% zoEK{S3a?xZyDh10C98(QHnLQecji6`H2u_i*D5nkJ?UmrhB=c(&XsP;42o!>?$ggF)ZzB<1rU$5k^Up-i2AtZ+XJ5 zS;*uvN;-$;U1@F6Z&?)6*+-!nqK!0qpwXC+4A-KOJ=crogtAT~-WH?Dw86-+UDq4W zQ@1vYL%EiCrzWQ+HWzky8n&*KU0cB3vNdCY(HF!u z{Fp~qk%-_Vk~f!Wl*e5OuBk{En<8QD!LHO=miWD(@hQQtO6McRP&IG*ZK{%s>x9*1 zZ9?sL4usC~bj6ArR!TT86V}Bis|tj&UnYzFe@?vOK*^*h>0vX;kp#VwR3EjM{blH42xK`>CQz`qHK0sGSD#@y#<- z-^_t$X$&V^<|SFjeP71ctb5Uy#g}0mu~jN$^SZh5qN17elRphr0QD?f%ERE|IHlxj@+jE|?hKVUOl99Wb7e|vPdm|51(2HtM!q|;Wdg^)c zbf!Yx$kAD=&2KP+oy}sfV4JE(ZncJS$mZgvgI{E9iB=Sw?H~HGGBrj~Pnottqx0M} zEKZPP1pvz+sZ5o<9R?4)?OIn$6%GQ}df-{;Mb>Gu?^+ zXaca*td77Upkteql7ceavfMa5S2CWnTh8v#0+-1`E&xS+Mlr)@v(L^L*q-v&!Lb8$ zD9oZI3ouv6XU*j(zee*FIh(1HuV-@@6R}$rKQ)wfs2YWU1s3_7E^~%nUO)}xuvTle zkY$uMn^mFCi@d~y29_x=i|btbsuDb3Mv~3BkW5Hk@LTo*xD~&wHj399ZZE(saV${Z zsK;{!?c2#=#fJ z8Lw+XSfm98>`=cXIT4aOkJ{?^z}X}EBSQP^88l55TVM~HA6l4WA)=091N1%@uF_J( zWO7CqE|{KxtZfb~mvpQ|lsNZHRhge7=1(c^RW*G96(i+)QK}8UVUa8-;FKxjvY%@BW!BHGb_^dUXM`e-9 zWt6ad(M&u@U2q=Dxgqn}il|Bv&eV<&PVnS$!3r=1O%M{T4;OYKk7Ew$J zEj3oF$fQ>m@hpuc#F)&nY8FM5i|Yk=HOMeBIx}zuvu#t`g1BinOOV1(A3vUjXXn5C z=dTC&_XYhx*Qe88^baO(d7cXNu)2+M^uU#^#KSY)4W;UYpD5k1WJSTnwfyZlTDZMF zJgmNEtG3e70pwxbgmX0(uhM*V*sw%BynS_up{1Evvco9jhdOP|O|eRzWlvG5O!nRGeF#3b|NC_j7a!{U>NVll(92Gu#=6-YAaA z12c=H+PLefpdRl=n7P?+Dgrv}!8OG<0+b4Y??GTQ|c?#rB@kF8l$ z=M^<)@#ku?n``d{zkS7kZ1xydzvuekHvMtsv~w)9y-eI_)@~#fhUVy|raO8Cf!fjq zYKFtVq~DgK!(zIQl9xoBK68&l$=Q6kY{E9AON8@0~4&m`QTHD@!1NYG5uaq71pSJeHP47Tq zo8I7J`1D}0-7PzC(Btt#fa?AJW`w5KX&)ThPYq4_PZS?(;jsN|dvLS8Xl=lBRSNk< zDgTJ5xw*zbOjoqg#LuNviuOSI`e_uxwj}#FG`F3CHvr;t@wGSD*Ie9JFj)cbZdiod z&*yA$y?~!CK3=>~IN=ur;L>~vY(sLn*$h<05Byput$g)r@#@>fw=cha`5MmID!tXA)^1pmv0NK$Bnbur zE+t0gjIIh3OIA(jynvih{>1Zi`Q;)_xP33AWU2=g-4~-^60|+`ElbwOgA#C1`0*Ug zi8+(jMxIm{Hv%>RV$C@80B4u_YSSO*w)6JvDPzzUb{ZJkKII!BU`q;(3RJ#Mc}RC; z!rYozk^V3v)asVyItf6;#kl2olh76vZ~qPcu=v--tAE+U`NX9HG@S32Q7 zK(EGtK{aZPpT~SzKoDhF06~EpmOx>j3MLBuh>Pps#ngCM!FIJ4D2WY9BZ`0aoGDR*q@lt;kK=F)gxB1g<@GPfO7U z@1gy=?HTnQ1Cv7JmH0Y9)_}6Boq+W+=X^ z2=-Z@ZndTi%WheJk6y~XA~kIdjv^2BR9};)q%avJAy5@B-uk zR%8c+e1e2ciPOFZn2$bBaKdtjd>R0L^pb%Y)h#BVm}JJ%f1hiGQ?=hs8<-eq(=REM z<=!*{&Kl{;*!DM?>QP$AC>}ulErrfB|Nj2xpY?I(?Bn^-@w@3ip8TvBrB`hj#0e>)gv&k*ni_#2-R;~P z>apX$qKP3b%Po8qdcE9C@BaqA;UZ@D@Qt+CEhw+Y#5M~W95P@duHEOh$=pE9%NDqv z;VZ4Npe7{ML#rBdsv-1YKTlWi2B)!@=Wh>TRz>uPIXX8R`$2SPE@hPnUbDiC$m_I( zWhBUNi}sOOlovcok~L^&E2Up0gkR%&YFp26wIcAzQscL&o49TtFdW;@Z4^6ERfhDk zO;!fltd3B4sL9Q1bPsojcVBzyc3e|bm)vF|pqI7YUTU-{TJ|M1faDw%T%}3O@&LZE z;;QbcYt(T_en6(Jw>8wAS)d))wS{1RyjI7suL}bfe&7L5JbhZRUn^^PS1vdi@epn>?u^9~gYhxDw zS1Ak71+@Emkpfi&j9PmeB`_MIK>;l2^qa|n`-uE2#kc^?n`Vwi)Z-S6ULlNUGx#$_ zq(DPb^B&Cm1$6EJ8-&pF(KvtF1XMfUrTLd6jbf=o(EtSqs|Zg8lA(D3slaV{U}EnE z5Yz)EfWX|>1r%Q}fFKgW67rn5Y7C6Z3X*qsq%jvMtbdV>|~)^ugb9WYE+(lMBqNAIB9#D$cc#+GUXt z2@cd~&&fD{b9n_;urSTMF=qB`2FGd)RgT=nI%5m4yS~HkS;wbBnvmu|G_A0J!{gNz zTDu;tjh0ZN{1EX(D!!HWN+>Sqt0;>Jj9EuzSM2TC48FJNWABc{8cMND^EeeOrqRxY zQD>U|-L$124(;6$vbc!~GbR}>qdN&z_0&G0+;PG;iKX<^@lt}A#ppO3FKlP<*I*B# zNW}dG%>XMR?j1O&M_r3&pTe$y=2NI!PahNLQ4#CYq8n0u3LPeF2~Da`exLB|6ico( zrLG}pEgbWDi+_Cj{vXMw2BT6@K&m38*#nk>L*?TjG2%#ECKTWkCj#Vo0`|weP>ebW6N0P>{%z43==xSKOfgp%^&Z2m&J4>4DKT37HRcnuwWzHsr zqbcl2INIUy`w;1DDMo=(jhi^XjKR=YQ0&I;qQ9=(6ff6cEbi12CRz&~u=^#;3Tu*{ z64}A6ly`Rvmy-AQv-wi;29`=k97>3nNa6V6{p;$f39@$=Nw5Y1;iTuG13aDIM7K;; zUrLTTL7ekjrq1x!TrJczXn8SlS44eAVl zhe7G=P;kyn4eevFosPjecATAj53A@NN>RsF_Bz$t^d|i>(999hInCR7AgnXHd7&@G zr#q#bH3`?Z?XKh#EonC=lW_UWRWH@)oI=-{_}2|bXth7*h3bqx4YpYpa{=+D7NEiJ zEqtnC#8r`^O(^)y*JXb`JF0`9Hx>EB?xR(f^u;sM!G{jHEZUyroj^rPiiw>ikzn&m zky_;!*r@M%_2`_|N95_4rZIKs_w5_iEnG0ruDxUT$+VeIqnN}du4cH+B8tGQniK2iXFw^)* z6x@7Cp;z_CjCP+HoiM2pi^9~ck2Pv7Gc{12o5(&(8{6kp0P)gCYahHviyG`@lx>^c z$qk$xMd8sv^pL*OsZHUa{xdAr8LPW^KW_W)U+RRsBRmh#v;N`hzpwwk{`>mx$^84j Q00030|E*!A000650FgNQvj6}9 literal 0 HcmV?d00001 diff --git a/requirements.lock b/requirements.lock index 699cff2720..9bc231778a 100644 --- a/requirements.lock +++ b/requirements.lock @@ -1,6 +1,6 @@ dependencies: - name: deckhouse_lib_helm repository: https://deckhouse.github.io/lib-helm - version: 1.55.1 -digest: sha256:5bdef3964d2672b8ff290f32e22569bc502e040e4e70274cab1762f27d9982e0 -generated: "2025-05-30T11:27:37.094721+03:00" + version: 1.65.2 +digest: sha256:0a3df4956c94972240fc04384db4ac243a9946a74beec719595d9e5961c4e5f8 +generated: "2025-12-22T19:44:03.319626+03:00" diff --git a/templates/custom-certificate.yaml b/templates/custom-certificate.yaml index 1e84723039..96586e3b38 100644 --- a/templates/custom-certificate.yaml +++ b/templates/custom-certificate.yaml @@ -4,10 +4,11 @@ {{- /* Usage: {{ include "helm_lib_module_https_copy_custom_certificate" (list . "namespace" "secret_name_prefix") }} */ -}} {{- /* Renders secret with [custom certificate](https://deckhouse.io/products/kubernetes-platform/documentation/v1/deckhouse-configure-global.html#parameters-modules-https-customcertificate) */ -}} {{- /* in passed namespace with passed prefix */ -}} +{{/* {{- define "override_until_fixed::helm_lib_module_https_copy_custom_certificate" -}} - {{- $context := index . 0 -}} {{- /* Template context with .Values, .Chart, etc */ -}} - {{- $namespace := index . 1 -}} {{- /* Namespace */ -}} - {{- $secret_name_prefix := index . 2 -}} {{- /* Secret name prefix */ -}} + {{- $context := index . 0 -}} {{- / Template context with .Values, .Chart, etc / -}} + {{- $namespace := index . 1 -}} {{- / Namespace / -}} + {{- $secret_name_prefix := index . 2 -}} {{- / Secret name prefix / -}} {{- $mode := include "helm_lib_module_https_mode" $context -}} {{- if eq $mode "CustomCertificate" -}} {{- $module_values := (index $context.Values (include "helm_lib_module_camelcase_name" $context)) -}} @@ -32,3 +33,6 @@ data: {{- include "override_until_fixed::helm_lib_module_https_copy_custom_certificate" (list . "d8-virtualization" "ingress-tls") -}} +*/}} + +{{- include "helm_lib_module_https_copy_custom_certificate" (list . "d8-virtualization" "ingress-tls") -}} From bcf4e557f051f14090cdc8131d95b858af368581 Mon Sep 17 00:00:00 2001 From: Nikita Korolev Date: Mon, 22 Dec 2025 20:11:25 +0300 Subject: [PATCH 41/71] static: fix loop, fix repodt Signed-off-by: Nikita Korolev --- .github/workflows/e2e-matrix.yml | 12 ++++++------ .github/workflows/e2e-reusable-pipeline.yml | 20 ++++++-------------- 2 files changed, 12 insertions(+), 20 deletions(-) diff --git a/.github/workflows/e2e-matrix.yml b/.github/workflows/e2e-matrix.yml index d0c8cc3ba6..b93fdd0a0f 100644 --- a/.github/workflows/e2e-matrix.yml +++ b/.github/workflows/e2e-matrix.yml @@ -207,9 +207,9 @@ jobs: parse_summary "$ceph_summary" "ceph" else # Save to json files - cat > /tmp/ceph.json << 'EOF' - ${{ needs.e2e-ceph.outputs.e2e-summary }} - EOF + cat > /tmp/ceph.json <<'EOF' + ${{ needs.e2e-ceph.outputs.e2e-summary }} + EOF if [ -s /tmp/ceph.json ] && [ "$(cat /tmp/ceph.json)" != '""' ] && [ "$(cat /tmp/ceph.json)" != '{}' ]; then parse_summary "$(cat /tmp/ceph.json)" "ceph" @@ -224,9 +224,9 @@ jobs: parse_summary "$replicated_summary" "replicated" else # Save to json files - cat > /tmp/replicated.json << 'EOF' - ${{ needs.e2e-replicated.outputs.e2e-summary }} - EOF + cat > /tmp/replicated.json <<'EOF' + ${{ needs.e2e-replicated.outputs.e2e-summary }} + EOF if [ -s /tmp/replicated.json ] && [ "$(cat /tmp/replicated.json)" != '""' ] && [ "$(cat /tmp/replicated.json)" != '{}' ]; then parse_summary "$(cat /tmp/replicated.json)" "replicated" diff --git a/.github/workflows/e2e-reusable-pipeline.yml b/.github/workflows/e2e-reusable-pipeline.yml index e0997bc7c7..f5657ca912 100644 --- a/.github/workflows/e2e-reusable-pipeline.yml +++ b/.github/workflows/e2e-reusable-pipeline.yml @@ -510,14 +510,10 @@ jobs: for i in $(seq 1 $count); do echo "[INFO] Check sds-replicated pods, linstor-node csi-node webhooks" - linstor_node=$(kubectl -n d8-sds-replicated-volume get pods 2>/dev/null | grep -c "linstor-node.*Running" 2>/dev/null) - csi_node=$(kubectl -n d8-sds-replicated-volume get pods 2>/dev/null | grep -c "csi-node.*Running" 2>/dev/null) - webhooks=$(kubectl -n d8-sds-replicated-volume get pods 2>/dev/null | grep -c "webhooks.*Running" 2>/dev/null) + linstor_node=$(kubectl -n d8-sds-replicated-volume get pods 2>/dev/null | grep -c "linstor-node.*Running" | tr -d '\n' || echo 0) + csi_node=$(kubectl -n d8-sds-replicated-volume get pods 2>/dev/null | grep -c "csi-node.*Running" | tr -d '\n' || echo 0) + webhooks=$(kubectl -n d8-sds-replicated-volume get pods 2>/dev/null | grep -c "webhooks.*Running" | tr -d '\n' || echo 0) - # linstor_node=$((linstor_node)) - # csi_node=$((csi_node)) - # webhooks=$((webhooks)) - echo "[INFO] Check if sds-replicated pods are ready" if [[ ${linstor_node} -ge ${workers} && ${csi_node} -ge ${workers} && ${webhooks} -ge 1 ]]; then echo "[SUCCESS] sds-replicated-volume is ready" @@ -710,13 +706,9 @@ jobs: for i in $(seq 1 $count); do echo "[INFO] Check ceph pods, mon mgr osd" - ceph_mgr=$(kubectl -n d8-operator-ceph get pods 2>/dev/null | grep -c "ceph-mgr.*Running" 2>/dev/null) - ceph_mon=$(kubectl -n d8-operator-ceph get pods 2>/dev/null | grep -c "ceph-mon.*Running" 2>/dev/null) - ceph_osd=$(kubectl -n d8-operator-ceph get pods 2>/dev/null | grep -c "ceph-osd.*Running" 2>/dev/null) - - # ceph_mgr=$((ceph_mgr)) - # ceph_mon=$((ceph_mon)) - # ceph_osd=$((ceph_osd)) + ceph_mgr=$(kubectl -n d8-operator-ceph get pods 2>/dev/null | grep -c "ceph-mgr.*Running" | tr -d '\n' || echo 0) + ceph_mon=$(kubectl -n d8-operator-ceph get pods 2>/dev/null | grep -c "ceph-mon.*Running" | tr -d '\n' || echo 0) + ceph_osd=$(kubectl -n d8-operator-ceph get pods 2>/dev/null | grep -c "ceph-osd.*Running" | tr -d '\n' || echo 0) echo "[INFO] check if ceph pods are ready" if [[ $ceph_mgr -ge 2 && $ceph_mon -ge 3 && $ceph_osd -ge 3 ]]; then From b361f0e87a9f1732a1472b56da6fd436e26ac49f Mon Sep 17 00:00:00 2001 From: Nikita Korolev Date: Mon, 22 Dec 2025 22:15:07 +0300 Subject: [PATCH 42/71] static: update count of attempt for sds Signed-off-by: Nikita Korolev --- .github/workflows/e2e-reusable-pipeline.yml | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/.github/workflows/e2e-reusable-pipeline.yml b/.github/workflows/e2e-reusable-pipeline.yml index f5657ca912..27eb590868 100644 --- a/.github/workflows/e2e-reusable-pipeline.yml +++ b/.github/workflows/e2e-reusable-pipeline.yml @@ -476,7 +476,7 @@ jobs: return 0 fi - echo "[INFO] Waiting 10s for sds-replicated-volume to be ready (attempt ${i}/${count})" + echo "[INFO] Waiting 10s for sds-replicated-volume namespace to be ready (attempt ${i}/${count})" if (( i % 5 == 0 )); then echo "[INFO] Show namespaces sds-replicated-volume" kubectl get ns | grep sds-replicated-volume || echo "Namespaces sds-replicated-volume are not ready" @@ -501,15 +501,15 @@ jobs: } sds_pods_ready() { - local count=60 + local count=100 local linstor_node local csi_node local webhooks local workers=$(kubectl get nodes -o name | grep -c worker) workers=$((workers)) + echo "[INFO] Wait while linstor-node csi-node webhooks pods are ready" for i in $(seq 1 $count); do - echo "[INFO] Check sds-replicated pods, linstor-node csi-node webhooks" linstor_node=$(kubectl -n d8-sds-replicated-volume get pods 2>/dev/null | grep -c "linstor-node.*Running" | tr -d '\n' || echo 0) csi_node=$(kubectl -n d8-sds-replicated-volume get pods 2>/dev/null | grep -c "csi-node.*Running" | tr -d '\n' || echo 0) webhooks=$(kubectl -n d8-sds-replicated-volume get pods 2>/dev/null | grep -c "webhooks.*Running" | tr -d '\n' || echo 0) @@ -519,8 +519,9 @@ jobs: echo "[SUCCESS] sds-replicated-volume is ready" return 0 fi - - echo "[INFO] Waiting 10s for sds-replicated-volume to be ready (attempt ${i}/${count})" + + echo "[WARNING] Not all pods are ready, linstor_node=${linstor_node}, csi_node=${csi_node}, webhooks=${webhooks}" + echo "[INFO] Waiting 10s for pods to be ready (attempt ${i}/${count})" if (( i % 5 == 0 )); then echo "[DEBUG] Get pods" kubectl -n d8-sds-replicated-volume get pods || true @@ -556,12 +557,11 @@ jobs: exit 1 fi - bdexists=false - for i in $(seq 1 $count); do blockdevices=$(kubectl get blockdevice -o name | wc -l || echo 0) if [ $blockdevices -ge $workers ]; then - bdexists=true + echo "[SUCCESS] Blockdevices is greater or equal to $workers" + kubectl get blockdevice return 0 fi @@ -699,7 +699,7 @@ jobs: } ceph_ready() { - local count=60 + local count=90 local ceph_mgr local ceph_mon local ceph_osd From 3420ed25a2401142e994335624b6ab7f2f9ce7a7 Mon Sep 17 00:00:00 2001 From: Nikita Korolev Date: Tue, 23 Dec 2025 15:19:44 +0300 Subject: [PATCH 43/71] fix scanInterval, add if for dvcr Signed-off-by: Nikita Korolev --- .github/workflows/e2e-reusable-pipeline.yml | 2 +- templates/dvcr/deployment.yaml | 4 +++- test/dvp-static-cluster/storage/ceph/01-mc.yaml | 2 +- test/dvp-static-cluster/storage/sds-replicated/mc.yaml | 4 ++-- 4 files changed, 7 insertions(+), 5 deletions(-) diff --git a/.github/workflows/e2e-reusable-pipeline.yml b/.github/workflows/e2e-reusable-pipeline.yml index 27eb590868..87e9ebf840 100644 --- a/.github/workflows/e2e-reusable-pipeline.yml +++ b/.github/workflows/e2e-reusable-pipeline.yml @@ -841,7 +841,7 @@ jobs: name: virtualization spec: imageTag: ${{ env.VIRTUALIZATION_TAG }} - scanInterval: 10m + scanInterval: 10h EOF echo "[INFO] Show module config virtualization info" diff --git a/templates/dvcr/deployment.yaml b/templates/dvcr/deployment.yaml index fdf58a9fc4..bcbafdc7e0 100644 --- a/templates/dvcr/deployment.yaml +++ b/templates/dvcr/deployment.yaml @@ -9,7 +9,8 @@ cpu: 50m memory: 15Mi {{- end }} -{{- if eq (include "dvcr.isEnabled" . ) "true"}} +{{- if eq (include "dvcr.isEnabled" . ) "true" }} +{{- if ne (dig "dvcr" "serviceIP" "" .Values.virtualization.internal) "" }} {{- if (.Values.global.enabledModules | has "vertical-pod-autoscaler-crd") }} --- apiVersion: autoscaling.k8s.io/v1 @@ -149,3 +150,4 @@ spec: serviceAccountName: dvcr {{- end }} +{{- end }} diff --git a/test/dvp-static-cluster/storage/ceph/01-mc.yaml b/test/dvp-static-cluster/storage/ceph/01-mc.yaml index 1f711e905e..070f2f4a56 100644 --- a/test/dvp-static-cluster/storage/ceph/01-mc.yaml +++ b/test/dvp-static-cluster/storage/ceph/01-mc.yaml @@ -23,4 +23,4 @@ metadata: name: csi-ceph spec: imageTag: main - scanInterval: 10m + scanInterval: 10h diff --git a/test/dvp-static-cluster/storage/sds-replicated/mc.yaml b/test/dvp-static-cluster/storage/sds-replicated/mc.yaml index b7d6abda99..cd4a8a2871 100644 --- a/test/dvp-static-cluster/storage/sds-replicated/mc.yaml +++ b/test/dvp-static-cluster/storage/sds-replicated/mc.yaml @@ -21,7 +21,7 @@ metadata: name: sds-node-configurator spec: imageTag: main - scanInterval: 15s + scanInterval: 10h --- apiVersion: deckhouse.io/v1alpha2 kind: ModulePullOverride @@ -29,4 +29,4 @@ metadata: name: sds-replicated-volume spec: imageTag: main - scanInterval: 15s + scanInterval: 10h From a38b8b03d6b1b19c7e9a7c4d99fec66435584ee4 Mon Sep 17 00:00:00 2001 From: Nikita Korolev Date: Tue, 23 Dec 2025 16:16:23 +0300 Subject: [PATCH 44/71] static: fix cond ceph and sds Signed-off-by: Nikita Korolev --- .github/workflows/e2e-reusable-pipeline.yml | 16 ++++++++-------- templates/dvcr/deployment.yaml | 2 -- templates/dvcr/rbac-for-us.yaml | 2 +- 3 files changed, 9 insertions(+), 11 deletions(-) diff --git a/.github/workflows/e2e-reusable-pipeline.yml b/.github/workflows/e2e-reusable-pipeline.yml index 87e9ebf840..38fa51c4b2 100644 --- a/.github/workflows/e2e-reusable-pipeline.yml +++ b/.github/workflows/e2e-reusable-pipeline.yml @@ -505,14 +505,14 @@ jobs: local linstor_node local csi_node local webhooks - local workers=$(kubectl get nodes -o name | grep -c worker) + local workers=$(kubectl get nodes -o name | grep worker | wc -l) workers=$((workers)) echo "[INFO] Wait while linstor-node csi-node webhooks pods are ready" for i in $(seq 1 $count); do - linstor_node=$(kubectl -n d8-sds-replicated-volume get pods 2>/dev/null | grep -c "linstor-node.*Running" | tr -d '\n' || echo 0) - csi_node=$(kubectl -n d8-sds-replicated-volume get pods 2>/dev/null | grep -c "csi-node.*Running" | tr -d '\n' || echo 0) - webhooks=$(kubectl -n d8-sds-replicated-volume get pods 2>/dev/null | grep -c "webhooks.*Running" | tr -d '\n' || echo 0) + linstor_node=$(kubectl -n d8-sds-replicated-volume get pods 2>/dev/null | grep "linstor-node.*Running" | wc -l) + csi_node=$(kubectl -n d8-sds-replicated-volume get pods 2>/dev/null | grep "csi-node.*Running" | wc -l) + webhooks=$(kubectl -n d8-sds-replicated-volume get pods 2>/dev/null | grep "webhooks.*Running" | wc -l) echo "[INFO] Check if sds-replicated pods are ready" if [[ ${linstor_node} -ge ${workers} && ${csi_node} -ge ${workers} && ${webhooks} -ge 1 ]]; then @@ -549,7 +549,7 @@ jobs: blockdevices_ready() { local count=60 - workers=$(kubectl get nodes -o name | grep -c worker) + workers=$(kubectl get nodes -o name | grep worker | wc -l) workers=$((workers)) if [[ $workers -eq 0 ]]; then @@ -706,9 +706,9 @@ jobs: for i in $(seq 1 $count); do echo "[INFO] Check ceph pods, mon mgr osd" - ceph_mgr=$(kubectl -n d8-operator-ceph get pods 2>/dev/null | grep -c "ceph-mgr.*Running" | tr -d '\n' || echo 0) - ceph_mon=$(kubectl -n d8-operator-ceph get pods 2>/dev/null | grep -c "ceph-mon.*Running" | tr -d '\n' || echo 0) - ceph_osd=$(kubectl -n d8-operator-ceph get pods 2>/dev/null | grep -c "ceph-osd.*Running" | tr -d '\n' || echo 0) + ceph_mgr=$(kubectl -n d8-operator-ceph get pods 2>/dev/null | grep "ceph-mgr.*Running" | wc -l) + ceph_mon=$(kubectl -n d8-operator-ceph get pods 2>/dev/null | grep "ceph-mon.*Running" | wc -l) + ceph_osd=$(kubectl -n d8-operator-ceph get pods 2>/dev/null | grep "ceph-osd.*Running" | wc -l) echo "[INFO] check if ceph pods are ready" if [[ $ceph_mgr -ge 2 && $ceph_mon -ge 3 && $ceph_osd -ge 3 ]]; then diff --git a/templates/dvcr/deployment.yaml b/templates/dvcr/deployment.yaml index bcbafdc7e0..7acd1c93b0 100644 --- a/templates/dvcr/deployment.yaml +++ b/templates/dvcr/deployment.yaml @@ -10,7 +10,6 @@ memory: 15Mi {{- end }} {{- if eq (include "dvcr.isEnabled" . ) "true" }} -{{- if ne (dig "dvcr" "serviceIP" "" .Values.virtualization.internal) "" }} {{- if (.Values.global.enabledModules | has "vertical-pod-autoscaler-crd") }} --- apiVersion: autoscaling.k8s.io/v1 @@ -150,4 +149,3 @@ spec: serviceAccountName: dvcr {{- end }} -{{- end }} diff --git a/templates/dvcr/rbac-for-us.yaml b/templates/dvcr/rbac-for-us.yaml index 84e4b6deab..db2f9f9a0c 100644 --- a/templates/dvcr/rbac-for-us.yaml +++ b/templates/dvcr/rbac-for-us.yaml @@ -48,7 +48,7 @@ metadata: roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole - name: d8:virtualization:dvcr + name: d8:{{ .Chart.Name }}:dvcr subjects: - kind: ServiceAccount name: dvcr From 0b558b94b23cd7e3d6389bf8d0d09f19903c2596 Mon Sep 17 00:00:00 2001 From: Nikita Korolev Date: Tue, 23 Dec 2025 21:49:51 +0300 Subject: [PATCH 45/71] static: fix cond ceph linstor Signed-off-by: Nikita Korolev --- .github/workflows/e2e-reusable-pipeline.yml | 35 ++++++++++++++------- templates/dvcr/deployment.yaml | 2 ++ 2 files changed, 26 insertions(+), 11 deletions(-) diff --git a/.github/workflows/e2e-reusable-pipeline.yml b/.github/workflows/e2e-reusable-pipeline.yml index 38fa51c4b2..74bdcbf2cc 100644 --- a/.github/workflows/e2e-reusable-pipeline.yml +++ b/.github/workflows/e2e-reusable-pipeline.yml @@ -405,7 +405,6 @@ jobs: - name: Upload kubeconfig config uses: actions/upload-artifact@v4 id: artifact-upload-kubeconfig - if: always() with: name: generated-files-kubeconfig-${{ inputs.storage_type }} path: ${{ env.SETUP_CLUSTER_TYPE_PATH }}/kube-config @@ -510,9 +509,9 @@ jobs: echo "[INFO] Wait while linstor-node csi-node webhooks pods are ready" for i in $(seq 1 $count); do - linstor_node=$(kubectl -n d8-sds-replicated-volume get pods 2>/dev/null | grep "linstor-node.*Running" | wc -l) - csi_node=$(kubectl -n d8-sds-replicated-volume get pods 2>/dev/null | grep "csi-node.*Running" | wc -l) - webhooks=$(kubectl -n d8-sds-replicated-volume get pods 2>/dev/null | grep "webhooks.*Running" | wc -l) + linstor_node=$(kubectl -n d8-sds-replicated-volume get pods | grep "linstor-node.*Running" | wc -l || echo 0) + csi_node=$(kubectl -n d8-sds-replicated-volume get pods | grep "csi-node.*Running" | wc -l || echo 0) + webhooks=$(kubectl -n d8-sds-replicated-volume get pods | grep "webhooks.*Running" | wc -l || echo 0) echo "[INFO] Check if sds-replicated pods are ready" if [[ ${linstor_node} -ge ${workers} && ${csi_node} -ge ${workers} && ${webhooks} -ge 1 ]]; then @@ -529,6 +528,7 @@ jobs: d8 s queue list | head -n 25 || echo "Failed to retrieve list queue" echo " " fi + sleep 10 done echo "[ERROR] sds-replicated-volume is not ready after ${count} attempts" @@ -706,9 +706,9 @@ jobs: for i in $(seq 1 $count); do echo "[INFO] Check ceph pods, mon mgr osd" - ceph_mgr=$(kubectl -n d8-operator-ceph get pods 2>/dev/null | grep "ceph-mgr.*Running" | wc -l) - ceph_mon=$(kubectl -n d8-operator-ceph get pods 2>/dev/null | grep "ceph-mon.*Running" | wc -l) - ceph_osd=$(kubectl -n d8-operator-ceph get pods 2>/dev/null | grep "ceph-osd.*Running" | wc -l) + ceph_mgr=$(kubectl -n d8-operator-ceph get pods | grep "ceph-mgr.*Running" | wc -l || echo 0) + ceph_mon=$(kubectl -n d8-operator-ceph get pods | grep "ceph-mon.*Running" | wc -l || echo 0) + ceph_osd=$(kubectl -n d8-operator-ceph get pods | grep "ceph-osd.*Running" | wc -l || echo 0) echo "[INFO] check if ceph pods are ready" if [[ $ceph_mgr -ge 2 && $ceph_mon -ge 3 && $ceph_osd -ge 3 ]]; then @@ -915,14 +915,27 @@ jobs: kubectl get ns d8-virtualization || true echo "[DEBUG] Show pods in namespace d8-virtualization" kubectl -n d8-virtualization get pods || true - echo "[DEBUG] Show dvcr pod yaml" + echo "[DEBUG] Show dvcr info" + echo "::group::📦 dvcr pod describe" + kubectl -n d8-virtualization describe pod -l app=dvcr || true + echo "::endgroup::" + echo " " echo "::group::📦 dvcr pod yaml" kubectl -n d8-virtualization get pods -l app=dvcr -o yaml || true echo "::endgroup::" - echo "[DEBUG] Show dvcr pod describe" - echo "::group::📦 dvcr pod describe" - kubectl -n d8-virtualization describe pod -l app=dvcr || true + echo " " + echo "::group::📦 dvcr deployment yaml" + kubectl -n d8-virtualization get deployment -l app=dvcr -o yaml || true + echo "::endgroup::" + echo " " + echo "::group::📦 dvcr deployment describe" + kubectl -n d8-virtualization describe deployment -l app=dvcr || true + echo "::endgroup::" + echo " " + echo "::group::📦 dvcr service yaml" + kubectl -n d8-virtualization get service -l app=dvcr -o yaml || true echo "::endgroup::" + echo " " echo "[DEBUG] Show pvc in namespace d8-virtualization" kubectl get pvc -n d8-virtualization || true echo "[DEBUG] Show storageclasses" diff --git a/templates/dvcr/deployment.yaml b/templates/dvcr/deployment.yaml index 7acd1c93b0..bcbafdc7e0 100644 --- a/templates/dvcr/deployment.yaml +++ b/templates/dvcr/deployment.yaml @@ -10,6 +10,7 @@ memory: 15Mi {{- end }} {{- if eq (include "dvcr.isEnabled" . ) "true" }} +{{- if ne (dig "dvcr" "serviceIP" "" .Values.virtualization.internal) "" }} {{- if (.Values.global.enabledModules | has "vertical-pod-autoscaler-crd") }} --- apiVersion: autoscaling.k8s.io/v1 @@ -149,3 +150,4 @@ spec: serviceAccountName: dvcr {{- end }} +{{- end }} From fb459c4d6bca6c1e80380987cdbb39403ebe904d Mon Sep 17 00:00:00 2001 From: Nikita Korolev Date: Wed, 24 Dec 2025 10:13:26 +0300 Subject: [PATCH 46/71] format yaml Signed-off-by: Nikita Korolev --- .github/workflows/e2e-matrix.yml | 2 +- .github/workflows/e2e-reusable-pipeline.yml | 9 +++------ 2 files changed, 4 insertions(+), 7 deletions(-) diff --git a/.github/workflows/e2e-matrix.yml b/.github/workflows/e2e-matrix.yml index b93fdd0a0f..2584ee8873 100644 --- a/.github/workflows/e2e-matrix.yml +++ b/.github/workflows/e2e-matrix.yml @@ -198,7 +198,7 @@ jobs: replicated_bootstrap_failed="${{ needs.e2e-replicated.outputs.bootstrap-failed }}" ceph_run_id="${{ needs.e2e-ceph.outputs.workflow-run-id }}" replicated_run_id="${{ needs.e2e-replicated.outputs.workflow-run-id }}" - + # Handle ceph storage type if [ "$ceph_bootstrap_failed" == "true" ]; then echo "[INFO] Bootstrap failed for ceph, generating failure summary" diff --git a/.github/workflows/e2e-reusable-pipeline.yml b/.github/workflows/e2e-reusable-pipeline.yml index 74bdcbf2cc..53d14c944a 100644 --- a/.github/workflows/e2e-reusable-pipeline.yml +++ b/.github/workflows/e2e-reusable-pipeline.yml @@ -800,7 +800,6 @@ jobs: env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - - name: Check kubeconfig run: | echo "[INFO] Configure kube config" @@ -878,7 +877,7 @@ jobs: sleep 10 done } - + virtualization_ready() { local count=90 local virtualization_status @@ -952,7 +951,7 @@ jobs: echo " " echo "[INFO] Waiting for Virtualization module to be ready" d8_queue - + virtualization_ready e2e-test: name: E2E test (${{ inputs.storage_type }}) @@ -988,7 +987,6 @@ jobs: env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - - name: Install kubectl CLI uses: azure/setup-kubectl@v4 @@ -1026,7 +1024,7 @@ jobs: echo "summary=$SUMMARY" >> $GITHUB_OUTPUT summary_file_name="e2e_summary_${{ inputs.storage_type }}_$DATE.json" echo "report_file_name=${summary_file_name}" >> $GITHUB_OUTPUT - + echo $SUMMARY > "${summary_file_name}" - name: Upload summary test results @@ -1061,7 +1059,6 @@ jobs: env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - - name: Install Task uses: arduino/setup-task@v2 with: From bbdad1fab305d6c79f318f5d2661bfd625811a1c Mon Sep 17 00:00:00 2001 From: Nikita Korolev Date: Wed, 24 Dec 2025 10:28:21 +0300 Subject: [PATCH 47/71] change script ci Signed-off-by: Nikita Korolev --- test/e2e/scripts/task_run_ci.sh | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/test/e2e/scripts/task_run_ci.sh b/test/e2e/scripts/task_run_ci.sh index 276b961ed6..373e83c923 100755 --- a/test/e2e/scripts/task_run_ci.sh +++ b/test/e2e/scripts/task_run_ci.sh @@ -27,15 +27,15 @@ else go tool ginkgo -v --race --timeout=$TIMEOUT | tee $GINKGO_RESULT fi -EXIT_CODE="${PIPESTATUS[0]}" +# EXIT_CODE="${PIPESTATUS[0]}" RESULT=$(sed -e "s/\x1b\[[0-9;]*m//g" $GINKGO_RESULT | grep --color=never -E "FAIL!|SUCCESS!") -if [[ $RESULT == FAIL!* || $EXIT_CODE -ne "0" ]]; then +if [[ $RESULT == FAIL!* ]]; then RESULT_STATUS=":x: FAIL!" elif [[ $RESULT == SUCCESS!* ]]; then RESULT_STATUS=":white_check_mark: SUCCESS!" else RESULT_STATUS=":question: UNKNOWN" - EXIT_CODE=1 + # EXIT_CODE=1 fi PASSED=$(echo "$RESULT" | grep -oP "\d+(?= Passed)") @@ -70,4 +70,4 @@ SUMMARY=$(jq -n \ echo "$SUMMARY" echo "SUMMARY=$(echo "$SUMMARY" | jq -c .)" >> $GITHUB_ENV -exit $EXIT_CODE +# exit $EXIT_CODE From 4b102714ca02101ea01f93c7b79e9305a6fd1f92 Mon Sep 17 00:00:00 2001 From: Nikita Korolev Date: Wed, 24 Dec 2025 11:37:36 +0300 Subject: [PATCH 48/71] change virtualization tag to pr1757 Signed-off-by: Nikita Korolev --- .github/workflows/e2e-matrix.yml | 4 ++-- templates/dvcr/deployment.yaml | 2 -- 2 files changed, 2 insertions(+), 4 deletions(-) diff --git a/.github/workflows/e2e-matrix.yml b/.github/workflows/e2e-matrix.yml index 2584ee8873..dc99752170 100644 --- a/.github/workflows/e2e-matrix.yml +++ b/.github/workflows/e2e-matrix.yml @@ -38,7 +38,7 @@ jobs: storage_type: ceph nested_storageclass_name: nested-ceph-pool-r2-csi-rbd branch: main - virtualization_tag: main + virtualization_tag: pr1757 deckhouse_tag: main pod_subnet_cidr: 10.88.0.0/16 service_subnet_cidr: 10.92.0.0/16 @@ -58,7 +58,7 @@ jobs: storage_type: replicated nested_storageclass_name: nested-thin-r1 branch: main - virtualization_tag: main + virtualization_tag: pr1757 deckhouse_tag: main pod_subnet_cidr: 10.89.0.0/16 service_subnet_cidr: 10.93.0.0/16 diff --git a/templates/dvcr/deployment.yaml b/templates/dvcr/deployment.yaml index bcbafdc7e0..7acd1c93b0 100644 --- a/templates/dvcr/deployment.yaml +++ b/templates/dvcr/deployment.yaml @@ -10,7 +10,6 @@ memory: 15Mi {{- end }} {{- if eq (include "dvcr.isEnabled" . ) "true" }} -{{- if ne (dig "dvcr" "serviceIP" "" .Values.virtualization.internal) "" }} {{- if (.Values.global.enabledModules | has "vertical-pod-autoscaler-crd") }} --- apiVersion: autoscaling.k8s.io/v1 @@ -150,4 +149,3 @@ spec: serviceAccountName: dvcr {{- end }} -{{- end }} From 7308eaa0c5a90701e5fb446b7f2dc742232b4a14 Mon Sep 17 00:00:00 2001 From: Nikita Korolev Date: Wed, 24 Dec 2025 13:57:01 +0300 Subject: [PATCH 49/71] fix cond Signed-off-by: Nikita Korolev --- .github/workflows/e2e-reusable-pipeline.yml | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/.github/workflows/e2e-reusable-pipeline.yml b/.github/workflows/e2e-reusable-pipeline.yml index 53d14c944a..b4a85b54c0 100644 --- a/.github/workflows/e2e-reusable-pipeline.yml +++ b/.github/workflows/e2e-reusable-pipeline.yml @@ -509,9 +509,9 @@ jobs: echo "[INFO] Wait while linstor-node csi-node webhooks pods are ready" for i in $(seq 1 $count); do - linstor_node=$(kubectl -n d8-sds-replicated-volume get pods | grep "linstor-node.*Running" | wc -l || echo 0) - csi_node=$(kubectl -n d8-sds-replicated-volume get pods | grep "csi-node.*Running" | wc -l || echo 0) - webhooks=$(kubectl -n d8-sds-replicated-volume get pods | grep "webhooks.*Running" | wc -l || echo 0) + linstor_node=$(kubectl -n d8-sds-replicated-volume get pods | grep "linstor-node.*Running" | wc -l || true) + csi_node=$(kubectl -n d8-sds-replicated-volume get pods | grep "csi-node.*Running" | wc -l || true) + webhooks=$(kubectl -n d8-sds-replicated-volume get pods | grep "webhooks.*Running" | wc -l || true) echo "[INFO] Check if sds-replicated pods are ready" if [[ ${linstor_node} -ge ${workers} && ${csi_node} -ge ${workers} && ${webhooks} -ge 1 ]]; then @@ -558,7 +558,7 @@ jobs: fi for i in $(seq 1 $count); do - blockdevices=$(kubectl get blockdevice -o name | wc -l || echo 0) + blockdevices=$(kubectl get blockdevice -o name | wc -l || true) if [ $blockdevices -ge $workers ]; then echo "[SUCCESS] Blockdevices is greater or equal to $workers" kubectl get blockdevice @@ -706,9 +706,9 @@ jobs: for i in $(seq 1 $count); do echo "[INFO] Check ceph pods, mon mgr osd" - ceph_mgr=$(kubectl -n d8-operator-ceph get pods | grep "ceph-mgr.*Running" | wc -l || echo 0) - ceph_mon=$(kubectl -n d8-operator-ceph get pods | grep "ceph-mon.*Running" | wc -l || echo 0) - ceph_osd=$(kubectl -n d8-operator-ceph get pods | grep "ceph-osd.*Running" | wc -l || echo 0) + ceph_mgr=$(kubectl -n d8-operator-ceph get pods | grep "ceph-mgr.*Running" | wc -l || true) + ceph_mon=$(kubectl -n d8-operator-ceph get pods | grep "ceph-mon.*Running" | wc -l || true) + ceph_osd=$(kubectl -n d8-operator-ceph get pods | grep "ceph-osd.*Running" | wc -l || true) echo "[INFO] check if ceph pods are ready" if [[ $ceph_mgr -ge 2 && $ceph_mon -ge 3 && $ceph_osd -ge 3 ]]; then From 7ef1ff8144b3ffc78da5ecfba5186b2693ab2935 Mon Sep 17 00:00:00 2001 From: Nikita Korolev Date: Wed, 24 Dec 2025 13:57:23 +0300 Subject: [PATCH 50/71] change virt tag to main Signed-off-by: Nikita Korolev --- .github/workflows/e2e-matrix.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/e2e-matrix.yml b/.github/workflows/e2e-matrix.yml index dc99752170..2584ee8873 100644 --- a/.github/workflows/e2e-matrix.yml +++ b/.github/workflows/e2e-matrix.yml @@ -38,7 +38,7 @@ jobs: storage_type: ceph nested_storageclass_name: nested-ceph-pool-r2-csi-rbd branch: main - virtualization_tag: pr1757 + virtualization_tag: main deckhouse_tag: main pod_subnet_cidr: 10.88.0.0/16 service_subnet_cidr: 10.92.0.0/16 @@ -58,7 +58,7 @@ jobs: storage_type: replicated nested_storageclass_name: nested-thin-r1 branch: main - virtualization_tag: pr1757 + virtualization_tag: main deckhouse_tag: main pod_subnet_cidr: 10.89.0.0/16 service_subnet_cidr: 10.93.0.0/16 From 4702363da717396c2b34ae9f24bb51ae9c8f30d5 Mon Sep 17 00:00:00 2001 From: Nikita Korolev Date: Wed, 24 Dec 2025 16:16:28 +0300 Subject: [PATCH 51/71] ci: update ci for multiple storage type report, add junit xml report file Signed-off-by: Nikita Korolev --- .github/workflows/e2e-matrix.yml | 112 ++++++++++------ .github/workflows/e2e-reusable-pipeline.yml | 121 ++++++++++++++---- .../storage/sds-replicated/rsc-gen.sh | 5 - 3 files changed, 165 insertions(+), 73 deletions(-) diff --git a/.github/workflows/e2e-matrix.yml b/.github/workflows/e2e-matrix.yml index 2584ee8873..adacd855ad 100644 --- a/.github/workflows/e2e-matrix.yml +++ b/.github/workflows/e2e-matrix.yml @@ -40,8 +40,6 @@ jobs: branch: main virtualization_tag: main deckhouse_tag: main - pod_subnet_cidr: 10.88.0.0/16 - service_subnet_cidr: 10.92.0.0/16 default_user: cloud go_version: "1.24.6" e2e_timeout: "3h" @@ -60,8 +58,6 @@ jobs: branch: main virtualization_tag: main deckhouse_tag: main - pod_subnet_cidr: 10.89.0.0/16 - service_subnet_cidr: 10.93.0.0/16 default_user: cloud go_version: "1.24.6" e2e_timeout: "3h" @@ -78,6 +74,8 @@ jobs: - e2e-ceph - e2e-replicated if: ${{ always()}} + env: + STORAGE_TYPES: '["ceph", "replicated"]' steps: - name: Send results to channel run: | @@ -193,45 +191,71 @@ jobs: DATE=$(date +"%Y-%m-%d") COMBINED_SUMMARY="## :dvp: **DVP | End-to-End tests | $DATE**\n\n" - # Check bootstrap status and generate summaries - ceph_bootstrap_failed="${{ needs.e2e-ceph.outputs.bootstrap-failed }}" - replicated_bootstrap_failed="${{ needs.e2e-replicated.outputs.bootstrap-failed }}" - ceph_run_id="${{ needs.e2e-ceph.outputs.workflow-run-id }}" - replicated_run_id="${{ needs.e2e-replicated.outputs.workflow-run-id }}" - - # Handle ceph storage type - if [ "$ceph_bootstrap_failed" == "true" ]; then - echo "[INFO] Bootstrap failed for ceph, generating failure summary" - ceph_summary=$(generate_bootstrap_failure_summary "ceph" "$ceph_run_id") - echo "$ceph_summary" > /tmp/ceph.json - parse_summary "$ceph_summary" "ceph" - else - # Save to json files - cat > /tmp/ceph.json <<'EOF' - ${{ needs.e2e-ceph.outputs.e2e-summary }} - EOF - - if [ -s /tmp/ceph.json ] && [ "$(cat /tmp/ceph.json)" != '""' ] && [ "$(cat /tmp/ceph.json)" != '{}' ]; then - parse_summary "$(cat /tmp/ceph.json)" "ceph" + readarray -t storage_types < <(echo "$STORAGE_TYPES" | jq -r '.[]') + + for storage in "${storage_types[@]}"; do + job_name="e2e-$storage" + + bootstrap_key="bootstrap_failed_${storage}" + summary_key="summary_${storage}" + run_id_key="run_id_${storage}" + + if [ "${!bootstrap_key}" == "true" ]; then + echo "[INFO] Bootstrap failed for $storage" + failed_summary=$(generate_bootstrap_failure_summary "$storage" "${!run_id_key}") + parse_summary "$failed_summary" "$storage" + else + json_content="${!summary_key}" + if [ -n "$json_content" ] && [ "$json_content" != '""' ] && [ "$json_content" != '{}' ]; then + parse_summary "$json_content" "$storage" + else + echo "[WARN] No valid summary for $storage" + csi=$(get_csi_name "$storage") + markdown_table+="| $csi | :warning: NO REPORT | 0 | 0 | 0 | 0 | — | — | — |\n" + fi fi - fi + done + + # # Check bootstrap status and generate summaries + # ceph_bootstrap_failed="${{ needs.e2e-ceph.outputs.bootstrap-failed }}" + # replicated_bootstrap_failed="${{ needs.e2e-replicated.outputs.bootstrap-failed }}" + # ceph_run_id="${{ needs.e2e-ceph.outputs.workflow-run-id }}" + # replicated_run_id="${{ needs.e2e-replicated.outputs.workflow-run-id }}" - # Handle replicated storage type - if [ "$replicated_bootstrap_failed" == "true" ]; then - echo "[INFO] Bootstrap failed for replicated, generating failure summary" - replicated_summary=$(generate_bootstrap_failure_summary "replicated" "$replicated_run_id") - echo "$replicated_summary" > /tmp/replicated.json - parse_summary "$replicated_summary" "replicated" - else - # Save to json files - cat > /tmp/replicated.json <<'EOF' - ${{ needs.e2e-replicated.outputs.e2e-summary }} - EOF + # # Handle ceph storage type + # if [ "$ceph_bootstrap_failed" == "true" ]; then + # echo "[INFO] Bootstrap failed for ceph, generating failure summary" + # ceph_summary=$(generate_bootstrap_failure_summary "ceph" "$ceph_run_id") + # echo "$ceph_summary" > /tmp/ceph.json + # parse_summary "$ceph_summary" "ceph" + # else + # # Save to json files + # cat > /tmp/ceph.json <<'EOF' + # ${{ needs.e2e-ceph.outputs.e2e-summary }} + # EOF - if [ -s /tmp/replicated.json ] && [ "$(cat /tmp/replicated.json)" != '""' ] && [ "$(cat /tmp/replicated.json)" != '{}' ]; then - parse_summary "$(cat /tmp/replicated.json)" "replicated" - fi - fi + # if [ -s /tmp/ceph.json ] && [ "$(cat /tmp/ceph.json)" != '""' ] && [ "$(cat /tmp/ceph.json)" != '{}' ]; then + # parse_summary "$(cat /tmp/ceph.json)" "ceph" + # fi + + # fi + + # # Handle replicated storage type + # if [ "$replicated_bootstrap_failed" == "true" ]; then + # echo "[INFO] Bootstrap failed for replicated, generating failure summary" + # replicated_summary=$(generate_bootstrap_failure_summary "replicated" "$replicated_run_id") + # echo "$replicated_summary" > /tmp/replicated.json + # parse_summary "$replicated_summary" "replicated" + # else + # # Save to json files + # cat > /tmp/replicated.json <<'EOF' + # ${{ needs.e2e-replicated.outputs.e2e-summary }} + # EOF + + # if [ -s /tmp/replicated.json ] && [ "$(cat /tmp/replicated.json)" != '""' ] && [ "$(cat /tmp/replicated.json)" != '{}' ]; then + # parse_summary "$(cat /tmp/replicated.json)" "replicated" + # fi + # fi COMBINED_SUMMARY+="${markdown_table}\n" @@ -243,3 +267,11 @@ jobs: fi env: LOOP_WEBHOOK_URL: ${{ secrets.LOOP_TEST_CHANNEL }} + + bootstrap_failed_ceph: ${{ needs.e2e-ceph.outputs.bootstrap-failed }} + summary_ceph: ${{ needs.e2e-ceph.outputs.e2e-summary }} + run_id_ceph: ${{ needs.e2e-ceph.outputs.workflow-run-id }} + + bootstrap_failed_replicated: ${{ needs.e2e-replicated.outputs.bootstrap-failed }} + summary_replicated: ${{ needs.e2e-replicated.outputs.e2e-summary }} + run_id_replicated: ${{ needs.e2e-replicated.outputs.workflow-run-id }} diff --git a/.github/workflows/e2e-reusable-pipeline.yml b/.github/workflows/e2e-reusable-pipeline.yml index b4a85b54c0..517155ae3d 100644 --- a/.github/workflows/e2e-reusable-pipeline.yml +++ b/.github/workflows/e2e-reusable-pipeline.yml @@ -75,7 +75,7 @@ on: BOOTSTRAP_DEV_PROXY: required: true outputs: - e2e-summary: + e2e-report: description: "E2E test results" value: ${{ jobs.e2e-test.outputs.report-summary }} bootstrap-failed: @@ -504,7 +504,7 @@ jobs: local linstor_node local csi_node local webhooks - local workers=$(kubectl get nodes -o name | grep worker | wc -l) + local workers=$(kubectl get nodes -o name | grep worker | wc -l || true) workers=$((workers)) echo "[INFO] Wait while linstor-node csi-node webhooks pods are ready" @@ -611,9 +611,13 @@ jobs: chmod +x rsc-gen.sh ./rsc-gen.sh - echo "[INFO] Enshure that nested storageclasses are created" - kubectl get storageclass | grep nested || echo "[WARNING] No nested storageclasses" - echo "[SUCCESS] Done" + echo "[INFO] Show existing storageclasses" + if ! kubectl get storageclass | grep -q nested; then + echo "[WARNING] No nested storageclasses" + else + kubectl get storageclass | grep nested + echo "[SUCCESS] Done" + fi - name: Configure ceph storage if: ${{ inputs.storage_type == 'ceph' }} working-directory: ${{ env.SETUP_CLUSTER_TYPE_PATH }}/storage/ceph @@ -631,14 +635,22 @@ jobs: if [ -n "$queue_count" ] && [ "$queue_count" = "0" ]; then echo "[SUCCESS] Queue is clear" return 0 - else + fi + + echo "[INFO] Wait until queues are empty ${i}/${count}" + if (( i % 5 == 0 )); then echo "[INFO] Show queue list" d8 s queue list | head -n25 || echo "[WARNING] Failed to retrieve list queue" + echo " " fi - echo "[INFO] Wait until queues are empty ${i}/${count}" - kubectl get ns | grep ceph || echo "Namespaces ceph not found" - echo " " + if (( i % 10 == 0 )); then + echo "[INFO] deckhouse logs" + echo "::group::📝 deckhouse logs" + d8 s logs | tail -n 100 + echo "::endgroup::" + echo " " + fi sleep 10 done } @@ -819,10 +831,6 @@ jobs: spec: enabled: true settings: - # https: - # certManager: - # clusterIssuerName: selfsigned - # mode: CertManager dvcr: storage: persistentVolumeClaim: @@ -856,22 +864,27 @@ jobs: d8_queue() { local count=90 + local queue_count for i in $(seq 1 $count) ; do queue_count=$(d8_queue_list) if [ -n "$queue_count" ] && [ "$queue_count" = "0" ]; then - echo "[SUCCESS] Queues are empty" + echo "[SUCCESS] Queue is clear" return 0 - else - echo "[INFO] Show queue list" - d8 s queue list | head -n25 || echo "Failed to retrieve list queue" fi echo "[INFO] Wait until queues are empty ${i}/${count}" - if (( i % 5 == 0 )); then + echo "[INFO] Show queue list" + d8 s queue list | head -n25 || echo "[WARNING] Failed to retrieve list queue" echo " " + fi + + if (( i % 10 == 0 )); then + echo "[INFO] deckhouse logs" + echo "::group::📝 deckhouse logs" d8 s logs | tail -n 100 + echo "::endgroup::" echo " " fi sleep 10 @@ -957,7 +970,7 @@ jobs: name: E2E test (${{ inputs.storage_type }}) runs-on: ubuntu-22.04 outputs: - report-summary: ${{ steps.e2e-summary.outputs.summary }} + report-summary: ${{ steps.e2e-report.outputs.summary }} needs: - bootstrap - configure-storage @@ -1010,30 +1023,82 @@ jobs: kubectl get vmclass/generic -o json | jq 'del(.status) | del(.metadata) | .metadata = {"name":"generic-for-e2e","annotations":{"virtualmachineclass.virtualization.deckhouse.io/is-default-class":"true"}} ' | kubectl create -f - - name: Run E2E - id: e2e-summary + id: e2e-report env: TIMEOUT: ${{ inputs.e2e_timeout }} CSI: ${{ inputs.storage_type }} + STORAGE_CLASS_NAME: ${{ inputs.nested_storageclass_name }} working-directory: ./test/e2e/ run: | + GINKGO_RESULT=$(mktemp) + DATE=$(date +"%Y-%m-%d") + START_TIME=$(date +"%H:%M:%S") + summary_file_name_junit="e2e_summary_${CSI}_${DATE}.xml" + summary_file_name_json="e2e_summary_${CSI}_${DATE}.json" + if [[ "${{ inputs.storage_type }}" == "replicated" ]]; then export SKIP_IMMEDIATE_SC_CHECK="yes" fi - STORAGE_CLASS_NAME=${{ inputs.nested_storageclass_name }} FOCUS="VirtualMachineConfiguration" task run:ci -v LABELS="Slow" + + FOCUS="VirtualMachineConfiguration" \ + go tool ginkgo \ + --focus "$FOCUS" \ + -v --race --timeout=$TIMEOUT \ + --junit-report=$summary_file_name_junit | tee $GINKGO_RESULT + + RESULT=$(sed -e "s/\x1b\[[0-9;]*m//g" $GINKGO_RESULT | grep --color=never -E "FAIL!|SUCCESS!") + if [[ $RESULT == FAIL!* ]]; then + RESULT_STATUS=":x: FAIL!" + elif [[ $RESULT == SUCCESS!* ]]; then + RESULT_STATUS=":white_check_mark: SUCCESS!" + else + RESULT_STATUS=":question: UNKNOWN" + fi + PASSED=$(echo "$RESULT" | grep -oP "\d+(?= Passed)") + FAILED=$(echo "$RESULT" | grep -oP "\d+(?= Failed)") + PENDING=$(echo "$RESULT" | grep -oP "\d+(?= Pending)") + SKIPPED=$(echo "$RESULT" | grep -oP "\d+(?= Skipped)") + + SUMMARY=$(jq -n \ + --arg csi "$CSI" \ + --arg date "$DATE" \ + --arg startTime "$START_TIME" \ + --arg branch "$GITHUB_REF_NAME" \ + --arg status "$RESULT_STATUS" \ + --argjson passed "$PASSED" \ + --argjson failed "$FAILED" \ + --argjson pending "$PENDING" \ + --argjson skipped "$SKIPPED" \ + --arg link "$GITHUB_SERVER_URL/$GITHUB_REPOSITORY/actions/runs/$GITHUB_RUN_ID" \ + '{ + CSI: $csi, + Date: $date, + StartTime: $startTime, + Branch: $branch, + Status: $status, + Passed: $passed, + Failed: $failed, + Pending: $pending, + Skipped: $skipped, + Link: $link + }' + ) + + echo "$SUMMARY" echo "summary=$SUMMARY" >> $GITHUB_OUTPUT - summary_file_name="e2e_summary_${{ inputs.storage_type }}_$DATE.json" - echo "report_file_name=${summary_file_name}" >> $GITHUB_OUTPUT - - echo $SUMMARY > "${summary_file_name}" + echo "report_file_name=${summary_file_name_json}" >> $GITHUB_OUTPUT + echo $SUMMARY > "${summary_file_name_json}" - name: Upload summary test results uses: actions/upload-artifact@v4 - id: e2e-summary-artifact + id: e2e-report-artifact if: always() with: - name: ${{ steps.e2e-summary.outputs.report_file_name }} - path: test/e2e/e2e_summary_${{ inputs.storage_type }}.json + name: ${{ steps.e2e-report.outputs.report_file_name }} + path: | + test/e2e/${{ steps.e2e-report.outputs.report_file_name }} + test/e2e/*.xml if-no-files-found: ignore undeploy-cluster: diff --git a/test/dvp-static-cluster/storage/sds-replicated/rsc-gen.sh b/test/dvp-static-cluster/storage/sds-replicated/rsc-gen.sh index 7d93443620..a3a03ea707 100644 --- a/test/dvp-static-cluster/storage/sds-replicated/rsc-gen.sh +++ b/test/dvp-static-cluster/storage/sds-replicated/rsc-gen.sh @@ -80,8 +80,3 @@ kubectl apply -f ${manifest} DEFAULT_STORAGE_CLASS=nested-thin-r1 kubectl patch mc global --type='json' -p='[{"op": "replace", "path": "/spec/settings/defaultClusterStorageClass", "value": "'"$DEFAULT_STORAGE_CLASS"'"}]' - -sleep 2 -echo "Showing Storage Classes" -kubectl get storageclass -echo " " From e157255003d0b4f2deef78c327950b2b90884149 Mon Sep 17 00:00:00 2001 From: Nikita Korolev Date: Wed, 24 Dec 2025 17:20:19 +0300 Subject: [PATCH 52/71] add sc for e2e Signed-off-by: Nikita Korolev --- .github/workflows/e2e-reusable-pipeline.yml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/.github/workflows/e2e-reusable-pipeline.yml b/.github/workflows/e2e-reusable-pipeline.yml index 517155ae3d..5ddd9984c0 100644 --- a/.github/workflows/e2e-reusable-pipeline.yml +++ b/.github/workflows/e2e-reusable-pipeline.yml @@ -1040,6 +1040,8 @@ jobs: export SKIP_IMMEDIATE_SC_CHECK="yes" fi + cp -a legacy/testdata /tmp/testdata + FOCUS="VirtualMachineConfiguration" \ go tool ginkgo \ --focus "$FOCUS" \ @@ -1093,7 +1095,6 @@ jobs: - name: Upload summary test results uses: actions/upload-artifact@v4 id: e2e-report-artifact - if: always() with: name: ${{ steps.e2e-report.outputs.report_file_name }} path: | From a6c1d393c2f15fffd9ed0c2bf5053e58c6840684 Mon Sep 17 00:00:00 2001 From: Nikita Korolev Date: Wed, 24 Dec 2025 19:26:35 +0300 Subject: [PATCH 53/71] fix run e2e Signed-off-by: Nikita Korolev --- .github/workflows/e2e-reusable-pipeline.yml | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/.github/workflows/e2e-reusable-pipeline.yml b/.github/workflows/e2e-reusable-pipeline.yml index 5ddd9984c0..acf3c6538e 100644 --- a/.github/workflows/e2e-reusable-pipeline.yml +++ b/.github/workflows/e2e-reusable-pipeline.yml @@ -1042,19 +1042,22 @@ jobs: cp -a legacy/testdata /tmp/testdata + set +e FOCUS="VirtualMachineConfiguration" \ go tool ginkgo \ - --focus "$FOCUS" \ + --focus "VirtualMachineConfiguration" \ -v --race --timeout=$TIMEOUT \ --junit-report=$summary_file_name_junit | tee $GINKGO_RESULT + GINKGO_EXIT_CODE=$? + set -e RESULT=$(sed -e "s/\x1b\[[0-9;]*m//g" $GINKGO_RESULT | grep --color=never -E "FAIL!|SUCCESS!") if [[ $RESULT == FAIL!* ]]; then - RESULT_STATUS=":x: FAIL!" + RESULT_STATUS=":x: FAIL!" elif [[ $RESULT == SUCCESS!* ]]; then - RESULT_STATUS=":white_check_mark: SUCCESS!" + RESULT_STATUS=":white_check_mark: SUCCESS!" else - RESULT_STATUS=":question: UNKNOWN" + RESULT_STATUS=":question: UNKNOWN" fi PASSED=$(echo "$RESULT" | grep -oP "\d+(?= Passed)") @@ -1088,7 +1091,7 @@ jobs: ) echo "$SUMMARY" - echo "summary=$SUMMARY" >> $GITHUB_OUTPUT + echo "summary=$(echo "$SUMMARY" | jq -c .)" >> $GITHUB_OUTPUT echo "report_file_name=${summary_file_name_json}" >> $GITHUB_OUTPUT echo $SUMMARY > "${summary_file_name_json}" From 430b9d17af0edb0bba7447dc1761daf5f1718236 Mon Sep 17 00:00:00 2001 From: Nikita Korolev Date: Wed, 24 Dec 2025 20:35:10 +0300 Subject: [PATCH 54/71] fix e2e report Signed-off-by: Nikita Korolev --- .github/workflows/e2e-matrix.yml | 55 +++++---------------- .github/workflows/e2e-reusable-pipeline.yml | 6 ++- 2 files changed, 15 insertions(+), 46 deletions(-) diff --git a/.github/workflows/e2e-matrix.yml b/.github/workflows/e2e-matrix.yml index adacd855ad..f62bc98f47 100644 --- a/.github/workflows/e2e-matrix.yml +++ b/.github/workflows/e2e-matrix.yml @@ -181,6 +181,7 @@ jobs: } # Initialize markdown table + echo "[INFO] Generate markdown table" markdown_table="" header="| CSI | Status | Passed | Failed | Pending | Skipped | Date | Time | Branch|\n" separator="|---|---|---|---|---|---|---|---|---|\n" @@ -191,8 +192,11 @@ jobs: DATE=$(date +"%Y-%m-%d") COMBINED_SUMMARY="## :dvp: **DVP | End-to-End tests | $DATE**\n\n" + echo "[INFO] Get storage types" readarray -t storage_types < <(echo "$STORAGE_TYPES" | jq -r '.[]') + echo "[INFO] Storage types: ${storage_types[@]}" + echo "[INFO] Generate summary for each storage type" for storage in "${storage_types[@]}"; do job_name="e2e-$storage" @@ -200,13 +204,15 @@ jobs: summary_key="summary_${storage}" run_id_key="run_id_${storage}" - if [ "${!bootstrap_key}" == "true" ]; then + if [ "${bootstrap_key}" == "true" ]; then echo "[INFO] Bootstrap failed for $storage" - failed_summary=$(generate_bootstrap_failure_summary "$storage" "${!run_id_key}") + failed_summary=$(generate_bootstrap_failure_summary "$storage" "${run_id_key}") parse_summary "$failed_summary" "$storage" else - json_content="${!summary_key}" + echo "[INFO] Get summary for $storage" + json_content="${summary_key}" if [ -n "$json_content" ] && [ "$json_content" != '""' ] && [ "$json_content" != '{}' ]; then + echo "[INFO] Parse summary for $storage" parse_summary "$json_content" "$storage" else echo "[WARN] No valid summary for $storage" @@ -216,52 +222,13 @@ jobs: fi done - # # Check bootstrap status and generate summaries - # ceph_bootstrap_failed="${{ needs.e2e-ceph.outputs.bootstrap-failed }}" - # replicated_bootstrap_failed="${{ needs.e2e-replicated.outputs.bootstrap-failed }}" - # ceph_run_id="${{ needs.e2e-ceph.outputs.workflow-run-id }}" - # replicated_run_id="${{ needs.e2e-replicated.outputs.workflow-run-id }}" - - # # Handle ceph storage type - # if [ "$ceph_bootstrap_failed" == "true" ]; then - # echo "[INFO] Bootstrap failed for ceph, generating failure summary" - # ceph_summary=$(generate_bootstrap_failure_summary "ceph" "$ceph_run_id") - # echo "$ceph_summary" > /tmp/ceph.json - # parse_summary "$ceph_summary" "ceph" - # else - # # Save to json files - # cat > /tmp/ceph.json <<'EOF' - # ${{ needs.e2e-ceph.outputs.e2e-summary }} - # EOF - - # if [ -s /tmp/ceph.json ] && [ "$(cat /tmp/ceph.json)" != '""' ] && [ "$(cat /tmp/ceph.json)" != '{}' ]; then - # parse_summary "$(cat /tmp/ceph.json)" "ceph" - # fi - - # fi - - # # Handle replicated storage type - # if [ "$replicated_bootstrap_failed" == "true" ]; then - # echo "[INFO] Bootstrap failed for replicated, generating failure summary" - # replicated_summary=$(generate_bootstrap_failure_summary "replicated" "$replicated_run_id") - # echo "$replicated_summary" > /tmp/replicated.json - # parse_summary "$replicated_summary" "replicated" - # else - # # Save to json files - # cat > /tmp/replicated.json <<'EOF' - # ${{ needs.e2e-replicated.outputs.e2e-summary }} - # EOF - - # if [ -s /tmp/replicated.json ] && [ "$(cat /tmp/replicated.json)" != '""' ] && [ "$(cat /tmp/replicated.json)" != '{}' ]; then - # parse_summary "$(cat /tmp/replicated.json)" "replicated" - # fi - # fi - + echo "[INFO] Combined summary" COMBINED_SUMMARY+="${markdown_table}\n" echo -e "$COMBINED_SUMMARY" # Send to channel if webhook is configured + echo "[INFO] Send to webhook" if [ -n "$LOOP_WEBHOOK_URL" ]; then curl --request POST --header 'Content-Type: application/json' --data "{\"text\": \"${COMBINED_SUMMARY}\"}" "$LOOP_WEBHOOK_URL" fi diff --git a/.github/workflows/e2e-reusable-pipeline.yml b/.github/workflows/e2e-reusable-pipeline.yml index acf3c6538e..80fe2f6581 100644 --- a/.github/workflows/e2e-reusable-pipeline.yml +++ b/.github/workflows/e2e-reusable-pipeline.yml @@ -1101,9 +1101,11 @@ jobs: with: name: ${{ steps.e2e-report.outputs.report_file_name }} path: | - test/e2e/${{ steps.e2e-report.outputs.report_file_name }} - test/e2e/*.xml + test/e2e/e2e_summary_*.json + test/e2e/e2e_summary_*.xml + test/e2e/*junit*.xml if-no-files-found: ignore + retention-days: 1 undeploy-cluster: name: Undeploy cluster (${{ inputs.storage_type }}) From 121106746445e06a04feaaf8fcc0c1674d50fe9c Mon Sep 17 00:00:00 2001 From: Nikita Korolev Date: Wed, 24 Dec 2025 22:21:21 +0300 Subject: [PATCH 55/71] add additional checks for virt Signed-off-by: Nikita Korolev --- .github/workflows/e2e-reusable-pipeline.yml | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/.github/workflows/e2e-reusable-pipeline.yml b/.github/workflows/e2e-reusable-pipeline.yml index 80fe2f6581..26eeff0b52 100644 --- a/.github/workflows/e2e-reusable-pipeline.yml +++ b/.github/workflows/e2e-reusable-pipeline.yml @@ -901,6 +901,7 @@ jobs: echo "[SUCCESS] Virtualization module is ready" kubectl get modules virtualization kubectl -n d8-virtualization get pods + kubectl get vmclass return 0 fi @@ -966,6 +967,11 @@ jobs: d8_queue virtualization_ready + + echo "[INFO] Checking Virtualization module deployments" + kubectl -n d8-virtualization wait --for=condition=Available deploy --all --timeout 300s + echo "[INFO] Checking virt-handler pods" + kubectl -n d8-virtualization wait --for=condition=Ready pods -l kubevirt.internal.virtualization.deckhouse.io=virt-handler e2e-test: name: E2E test (${{ inputs.storage_type }}) runs-on: ubuntu-22.04 @@ -1095,6 +1101,9 @@ jobs: echo "report_file_name=${summary_file_name_json}" >> $GITHUB_OUTPUT echo $SUMMARY > "${summary_file_name_json}" + echo "[INFO] Exit code: $GINKGO_EXIT_CODE" + exit $GINKGO_EXIT_CODE + - name: Upload summary test results uses: actions/upload-artifact@v4 id: e2e-report-artifact From 4d53978c3b0bcafb1c0ec7c44c47c5e78c7f4c46 Mon Sep 17 00:00:00 2001 From: Nikita Korolev Date: Wed, 24 Dec 2025 23:06:35 +0300 Subject: [PATCH 56/71] add info about storage setup Signed-off-by: Nikita Korolev --- .github/workflows/e2e-matrix.yml | 22 ++++++++++++++++++--- .github/workflows/e2e-reusable-pipeline.yml | 16 +++++++++++++++ 2 files changed, 35 insertions(+), 3 deletions(-) diff --git a/.github/workflows/e2e-matrix.yml b/.github/workflows/e2e-matrix.yml index f62bc98f47..a484a409af 100644 --- a/.github/workflows/e2e-matrix.yml +++ b/.github/workflows/e2e-matrix.yml @@ -96,9 +96,10 @@ jobs: } # Generate bootstrap failure summary - generate_bootstrap_failure_summary() { + generate_stage_failure_summary() { local storage_type=$1 local workflow_run_id=$2 + local stage=$3 local csi=$(get_csi_name "$storage_type") local date=$(date +"%Y-%m-%d") local time=$(date +"%H:%M:%S") @@ -106,12 +107,18 @@ jobs: local link="${GITHUB_SERVER_URL}/${GITHUB_REPOSITORY}/actions/runs/${workflow_run_id}" # Create JSON summary for bootstrap failure + if [ "$stage" == "cluster_bootstrap" ]; then + local status=":x: BOOTSTRAP CLUSTER FAILED" + elif [ "$stage" == "storage_setup" ]; then + local status=":x: STORAGE SETUP FAILED" + fi + local summary_json=$(jq -n \ --arg csi "$csi" \ --arg date "$date" \ --arg time "$time" \ --arg branch "$branch" \ - --arg status ":x: BOOTSTRAP CLUSTER FAILED" \ + --arg status "$status" \ --arg link "$link" \ '{CSI: $csi, Date: $date, StartTime: $time, Branch: $branch, Status: $status, Passed: 0, Failed: 0, Pending: 0, Skipped: 0, Link: $link}') @@ -131,6 +138,7 @@ jobs: # Try to parse as JSON (handle both JSON string and already parsed JSON) if ! echo "$summary_json" | jq empty 2>/dev/null; then echo "Warning: Invalid JSON for $storage_type: $summary_json" + echo "[DEBUG] json: $summary_json" return fi @@ -203,10 +211,15 @@ jobs: bootstrap_key="bootstrap_failed_${storage}" summary_key="summary_${storage}" run_id_key="run_id_${storage}" + storage_setup_key="storage_setup_failed_${storage}" if [ "${bootstrap_key}" == "true" ]; then echo "[INFO] Bootstrap failed for $storage" - failed_summary=$(generate_bootstrap_failure_summary "$storage" "${run_id_key}") + failed_summary=$(generate_stage_failure_summary "$storage" "${run_id_key}" "cluster_bootstrap") + parse_summary "$failed_summary" "$storage" + elif [ "${storage_setup_key}" == "true" ]; then + echo "[INFO] Storage setup failed for $storage" + failed_summary=$(generate_stage_failure_summary "$storage" "${run_id_key}" "storage_setup") parse_summary "$failed_summary" "$storage" else echo "[INFO] Get summary for $storage" @@ -238,7 +251,10 @@ jobs: bootstrap_failed_ceph: ${{ needs.e2e-ceph.outputs.bootstrap-failed }} summary_ceph: ${{ needs.e2e-ceph.outputs.e2e-summary }} run_id_ceph: ${{ needs.e2e-ceph.outputs.workflow-run-id }} + storage_setup_failed_ceph: ${{ needs.e2e-ceph.outputs.storage-ceph-setup }} bootstrap_failed_replicated: ${{ needs.e2e-replicated.outputs.bootstrap-failed }} summary_replicated: ${{ needs.e2e-replicated.outputs.e2e-summary }} run_id_replicated: ${{ needs.e2e-replicated.outputs.workflow-run-id }} + storage_setup_failed_replicated: ${{ needs.e2e-replicated.outputs.storage-replicated-setup }} + diff --git a/.github/workflows/e2e-reusable-pipeline.yml b/.github/workflows/e2e-reusable-pipeline.yml index 26eeff0b52..f528cd2e6c 100644 --- a/.github/workflows/e2e-reusable-pipeline.yml +++ b/.github/workflows/e2e-reusable-pipeline.yml @@ -84,6 +84,12 @@ on: workflow-run-id: description: "Workflow run ID" value: ${{ github.run_id }} + storage-replicated-setup: + description: "Storage replicated setup failed" + value: ${{ jobs.configure-storage.outputs.storage-replicated-setup }} + storage-ceph-setup: + description: "Storage ceph setup failed" + value: ${{ jobs.configure-storage.outputs.storage-ceph-setup }} env: BRANCH: ${{ inputs.branch }} @@ -416,6 +422,9 @@ jobs: name: Configure storage (${{ inputs.storage_type }}) runs-on: ubuntu-latest needs: bootstrap + outputs: + storage-replicated-setup: ${{ steps.storage-replicated-setup.outcome == 'failure' }} + storage-ceph-setup: ${{ steps.storage-ceph-setup.outcome == 'failure' }} steps: - uses: actions/checkout@v4 @@ -459,7 +468,13 @@ jobs: fi done + if $i -eq 3; then + echo "[ERROR] Failed to retrieve nodes." + exit 1 + fi + - name: Configure replicated storage + id: storage-replicated-setup if: ${{ inputs.storage_type == 'replicated' }} working-directory: ${{ env.SETUP_CLUSTER_TYPE_PATH }}/storage/sds-replicated run: | @@ -620,6 +635,7 @@ jobs: fi - name: Configure ceph storage if: ${{ inputs.storage_type == 'ceph' }} + id: storage-ceph-setup working-directory: ${{ env.SETUP_CLUSTER_TYPE_PATH }}/storage/ceph run: | d8_queue_list() { From 933a530d04cf92db7cea8689252e20b151492717 Mon Sep 17 00:00:00 2001 From: Nikita Korolev Date: Thu, 25 Dec 2025 12:26:43 +0300 Subject: [PATCH 57/71] try use artifact for reports Signed-off-by: Nikita Korolev --- .github/workflows/e2e-matrix.yml | 163 ++++++++++++++------ .github/workflows/e2e-reusable-pipeline.yml | 139 ++++++++++++++--- 2 files changed, 233 insertions(+), 69 deletions(-) diff --git a/.github/workflows/e2e-matrix.yml b/.github/workflows/e2e-matrix.yml index a484a409af..22a6e1e396 100644 --- a/.github/workflows/e2e-matrix.yml +++ b/.github/workflows/e2e-matrix.yml @@ -77,6 +77,17 @@ jobs: env: STORAGE_TYPES: '["ceph", "replicated"]' steps: + - uses: actions/checkout@v4 + + - name: Download E2E report artifacts + uses: actions/download-artifact@v5 + continue-on-error: true + id: download-artifacts + with: + pattern: "e2e-report-*" + path: downloaded-artifacts/ + merge-multiple: false + - name: Send results to channel run: | # Map storage types to CSI names @@ -95,36 +106,77 @@ jobs: esac } - # Generate bootstrap failure summary - generate_stage_failure_summary() { + # Function to load and parse report from artifact + load_report_from_artifact() { local storage_type=$1 - local workflow_run_id=$2 - local stage=$3 + local base_path="downloaded-artifacts/" + + # Find report file - search in all subdirectories matching the storage type pattern + # Artifacts are downloaded with names like "e2e-report-{storage_type}-{run_id}" + # and placed in subdirectories with the same name + local report_file=$(find "$base_path" -type f -name "e2e_report_${storage_type}.json" 2>/dev/null | head -1) + + # Alternative: search by artifact directory name pattern + if [ -z "$report_file" ] || [ ! -f "$report_file" ]; then + local artifact_dir=$(find "$base_path" -type d -name "e2e-report-${storage_type}-*" 2>/dev/null | head -1) + if [ -n "$artifact_dir" ]; then + report_file=$(find "$artifact_dir" -name "e2e_report_*.json" -type f 2>/dev/null | head -1) + fi + fi + + if [ -n "$report_file" ] && [ -f "$report_file" ]; then + echo "[INFO] Found report file: $report_file" + cat "$report_file" + return 0 + else + echo "[WARN] Report file not found for storage type: $storage_type" + echo "[DEBUG] Searched in: $base_path" + return 1 + fi + } + + # Function to create failure summary JSON (fallback) + create_failure_summary() { + local storage_type=$1 + local stage=$2 + local run_id=$3 local csi=$(get_csi_name "$storage_type") local date=$(date +"%Y-%m-%d") local time=$(date +"%H:%M:%S") local branch="${GITHUB_HEAD_REF:-${GITHUB_REF#refs/heads/}}" - local link="${GITHUB_SERVER_URL}/${GITHUB_REPOSITORY}/actions/runs/${workflow_run_id}" + local link="${GITHUB_SERVER_URL}/${GITHUB_REPOSITORY}/actions/runs/${run_id:-${GITHUB_RUN_ID}}" - # Create JSON summary for bootstrap failure - if [ "$stage" == "cluster_bootstrap" ]; then - local status=":x: BOOTSTRAP CLUSTER FAILED" - elif [ "$stage" == "storage_setup" ]; then - local status=":x: STORAGE SETUP FAILED" - fi - - local summary_json=$(jq -n \ + # Map stage to status message + local status_msg + case "$stage" in + "bootstrap") + status_msg=":x: BOOTSTRAP CLUSTER FAILED" + ;; + "storage-setup") + status_msg=":x: STORAGE SETUP FAILED" + ;; + "virtualization-setup") + status_msg=":x: VIRTUALIZATION SETUP FAILED" + ;; + "e2e-test") + status_msg=":x: E2E TEST FAILED" + ;; + *) + status_msg=":question: UNKNOWN" + ;; + esac + + jq -n \ --arg csi "$csi" \ --arg date "$date" \ --arg time "$time" \ --arg branch "$branch" \ - --arg status "$status" \ + --arg status "$status_msg" \ --arg link "$link" \ - '{CSI: $csi, Date: $date, StartTime: $time, Branch: $branch, Status: $status, Passed: 0, Failed: 0, Pending: 0, Skipped: 0, Link: $link}') - - echo "$summary_json" + '{CSI: $csi, Date: $date, StartTime: $time, Branch: $branch, Status: $status, Passed: 0, Failed: 0, Pending: 0, Skipped: 0, Link: $link}' } + # Parse summary JSON and add to table parse_summary() { local summary_json=$1 @@ -206,32 +258,51 @@ jobs: echo "[INFO] Generate summary for each storage type" for storage in "${storage_types[@]}"; do - job_name="e2e-$storage" - - bootstrap_key="bootstrap_failed_${storage}" - summary_key="summary_${storage}" - run_id_key="run_id_${storage}" - storage_setup_key="storage_setup_failed_${storage}" - - if [ "${bootstrap_key}" == "true" ]; then - echo "[INFO] Bootstrap failed for $storage" - failed_summary=$(generate_stage_failure_summary "$storage" "${run_id_key}" "cluster_bootstrap") - parse_summary "$failed_summary" "$storage" - elif [ "${storage_setup_key}" == "true" ]; then - echo "[INFO] Storage setup failed for $storage" - failed_summary=$(generate_stage_failure_summary "$storage" "${run_id_key}" "storage_setup") - parse_summary "$failed_summary" "$storage" - else - echo "[INFO] Get summary for $storage" - json_content="${summary_key}" - if [ -n "$json_content" ] && [ "$json_content" != '""' ] && [ "$json_content" != '{}' ]; then - echo "[INFO] Parse summary for $storage" - parse_summary "$json_content" "$storage" + echo "[INFO] Processing $storage" + + # Try to load report from artifact + structured_report=$(load_report_from_artifact "$storage" 2>/dev/null || true) + + if [ -n "$structured_report" ] && echo "$structured_report" | jq empty 2>/dev/null; then + # Extract report data from structured file + report_json=$(echo "$structured_report" | jq -c '.report // empty') + failed_stage=$(echo "$structured_report" | jq -r '.failed_stage // empty') + workflow_run_id=$(echo "$structured_report" | jq -r '.workflow_run_id // empty') + + echo "[INFO] Loaded report for $storage (failed_stage: ${failed_stage}, run_id: ${workflow_run_id})" + + # Validate and parse report + if [ -n "$report_json" ] && [ "$report_json" != "" ] && [ "$report_json" != "null" ]; then + if echo "$report_json" | jq empty 2>/dev/null; then + echo "[INFO] Found valid report for $storage" + parse_summary "$report_json" "$storage" + else + echo "[WARN] Invalid report JSON for $storage, using failed stage info" + # Fallback to failed stage + if [ -n "$failed_stage" ] && [ "$failed_stage" != "" ] && [ "$failed_stage" != "success" ]; then + failed_summary=$(create_failure_summary "$storage" "$failed_stage" "$workflow_run_id") + parse_summary "$failed_summary" "$storage" + else + csi=$(get_csi_name "$storage") + markdown_table+="| $csi | :warning: INVALID REPORT | 0 | 0 | 0 | 0 | — | — | — |\n" + fi + fi else - echo "[WARN] No valid summary for $storage" - csi=$(get_csi_name "$storage") - markdown_table+="| $csi | :warning: NO REPORT | 0 | 0 | 0 | 0 | — | — | — |\n" + # No report in structured file, use failed stage + if [ -n "$failed_stage" ] && [ "$failed_stage" != "" ] && [ "$failed_stage" != "success" ]; then + echo "[INFO] Stage '$failed_stage' failed for $storage" + failed_summary=$(create_failure_summary "$storage" "$failed_stage" "$workflow_run_id") + parse_summary "$failed_summary" "$storage" + else + csi=$(get_csi_name "$storage") + markdown_table+="| $csi | :warning: NO REPORT | 0 | 0 | 0 | 0 | — | — | — |\n" + fi fi + else + # Artifact not found or invalid, show warning + echo "[WARN] Could not load report artifact for $storage" + csi=$(get_csi_name "$storage") + markdown_table+="| $csi | :warning: ARTIFACT NOT FOUND | 0 | 0 | 0 | 0 | — | — | — |\n" fi done @@ -248,13 +319,3 @@ jobs: env: LOOP_WEBHOOK_URL: ${{ secrets.LOOP_TEST_CHANNEL }} - bootstrap_failed_ceph: ${{ needs.e2e-ceph.outputs.bootstrap-failed }} - summary_ceph: ${{ needs.e2e-ceph.outputs.e2e-summary }} - run_id_ceph: ${{ needs.e2e-ceph.outputs.workflow-run-id }} - storage_setup_failed_ceph: ${{ needs.e2e-ceph.outputs.storage-ceph-setup }} - - bootstrap_failed_replicated: ${{ needs.e2e-replicated.outputs.bootstrap-failed }} - summary_replicated: ${{ needs.e2e-replicated.outputs.e2e-summary }} - run_id_replicated: ${{ needs.e2e-replicated.outputs.workflow-run-id }} - storage_setup_failed_replicated: ${{ needs.e2e-replicated.outputs.storage-replicated-setup }} - diff --git a/.github/workflows/e2e-reusable-pipeline.yml b/.github/workflows/e2e-reusable-pipeline.yml index f528cd2e6c..35250e9b3f 100644 --- a/.github/workflows/e2e-reusable-pipeline.yml +++ b/.github/workflows/e2e-reusable-pipeline.yml @@ -75,21 +75,9 @@ on: BOOTSTRAP_DEV_PROXY: required: true outputs: - e2e-report: - description: "E2E test results" - value: ${{ jobs.e2e-test.outputs.report-summary }} - bootstrap-failed: - description: "Bootstrap cluster failed" - value: ${{ jobs.bootstrap.outputs.bootstrap-failed }} - workflow-run-id: - description: "Workflow run ID" - value: ${{ github.run_id }} - storage-replicated-setup: - description: "Storage replicated setup failed" - value: ${{ jobs.configure-storage.outputs.storage-replicated-setup }} - storage-ceph-setup: - description: "Storage ceph setup failed" - value: ${{ jobs.configure-storage.outputs.storage-ceph-setup }} + artifact-name: + description: "Name of the uploaded artifact with E2E report" + value: ${{ jobs.e2e-test.outputs.artifact-name }} env: BRANCH: ${{ inputs.branch }} @@ -992,7 +980,7 @@ jobs: name: E2E test (${{ inputs.storage_type }}) runs-on: ubuntu-22.04 outputs: - report-summary: ${{ steps.e2e-report.outputs.summary }} + artifact-name: ${{ steps.set-artifact-name.outputs.artifact-name }} needs: - bootstrap - configure-storage @@ -1120,11 +1108,126 @@ jobs: echo "[INFO] Exit code: $GINKGO_EXIT_CODE" exit $GINKGO_EXIT_CODE - - name: Upload summary test results + - name: Determine failed stage and prepare report + id: determine-stage + if: always() + run: | + # Get branch name + BRANCH_NAME="${{ github.head_ref || github.ref_name }}" + if [ -z "$BRANCH_NAME" ] || [ "$BRANCH_NAME" == "refs/heads/" ]; then + BRANCH_NAME="${{ github.ref_name }}" + fi + + # Function to create failure summary JSON + create_failure_summary() { + local stage=$1 + local status_msg=$2 + local csi="${{ inputs.storage_type }}" + local date=$(date +"%Y-%m-%d") + local start_time=$(date +"%H:%M:%S") + local branch="$BRANCH_NAME" + local link="${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}" + + jq -n \ + --arg csi "$csi" \ + --arg date "$date" \ + --arg startTime "$start_time" \ + --arg branch "$branch" \ + --arg status "$status_msg" \ + --arg link "$link" \ + '{ + CSI: $csi, + Date: $date, + StartTime: $startTime, + Branch: $branch, + Status: $status, + Passed: 0, + Failed: 0, + Pending: 0, + Skipped: 0, + Link: $link + }' + } + + # Determine which stage failed and prepare report + REPORT_JSON="" + FAILED_STAGE="" + + if [ "${{ needs.bootstrap.result }}" != "success" ]; then + FAILED_STAGE="bootstrap" + REPORT_JSON=$(create_failure_summary "bootstrap" ":x: BOOTSTRAP CLUSTER FAILED") + elif [ "${{ needs.configure-storage.result }}" != "success" ]; then + FAILED_STAGE="storage-setup" + REPORT_JSON=$(create_failure_summary "storage-setup" ":x: STORAGE SETUP FAILED") + elif [ "${{ needs.configure-virtualization.result }}" != "success" ]; then + FAILED_STAGE="virtualization-setup" + REPORT_JSON=$(create_failure_summary "virtualization-setup" ":x: VIRTUALIZATION SETUP FAILED") + elif [ "${{ steps.e2e-report.outcome }}" != "success" ]; then + FAILED_STAGE="e2e-test" + # Use report from e2e-report step if available + E2E_REPORT="${{ steps.e2e-report.outputs.summary }}" + if [ -n "$E2E_REPORT" ] && [ "$E2E_REPORT" != "" ]; then + REPORT_JSON="$E2E_REPORT" + else + REPORT_JSON=$(create_failure_summary "e2e-test" ":x: E2E TEST FAILED") + fi + else + FAILED_STAGE="success" + # Use report from e2e-report step + E2E_REPORT="${{ steps.e2e-report.outputs.summary }}" + if [ -n "$E2E_REPORT" ] && [ "$E2E_REPORT" != "" ]; then + REPORT_JSON="$E2E_REPORT" + else + REPORT_JSON=$(create_failure_summary "success" ":white_check_mark: SUCCESS!") + fi + fi + + # Create structured report file with metadata + REPORT_FILE="e2e_report_${{ inputs.storage_type }}.json" + # Parse REPORT_JSON to ensure it's valid JSON before using it + REPORT_JSON_PARSED=$(echo "$REPORT_JSON" | jq -c .) + jq -n \ + --argjson report "$REPORT_JSON_PARSED" \ + --arg storage_type "${{ inputs.storage_type }}" \ + --arg failed_stage "$FAILED_STAGE" \ + --arg workflow_run_id "${{ github.run_id }}" \ + --arg workflow_run_url "${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}" \ + '{ + storage_type: $storage_type, + failed_stage: $failed_stage, + workflow_run_id: $workflow_run_id, + workflow_run_url: $workflow_run_url, + report: $report + }' > "$REPORT_FILE" + + echo "report_file=$REPORT_FILE" >> $GITHUB_OUTPUT + echo "[INFO] Created report file: $REPORT_FILE" + cat "$REPORT_FILE" | jq . + + - name: Upload E2E report artifact + id: upload-artifact + if: always() + uses: actions/upload-artifact@v4 + with: + name: e2e-report-${{ inputs.storage_type }}-${{ github.run_id }} + path: ${{ steps.determine-stage.outputs.report_file }} + retention-days: 1 + continue-on-error: true + + - name: Set artifact name output + id: set-artifact-name + if: always() + run: | + ARTIFACT_NAME="e2e-report-${{ inputs.storage_type }}-${{ github.run_id }}" + echo "artifact-name=$ARTIFACT_NAME" >> $GITHUB_OUTPUT + echo "[INFO] Artifact name: $ARTIFACT_NAME" + + - name: Upload summary test results (junit/xml) uses: actions/upload-artifact@v4 id: e2e-report-artifact + if: always() with: - name: ${{ steps.e2e-report.outputs.report_file_name }} + name: e2e-test-results-${{ inputs.storage_type }}-${{ github.run_id }} path: | test/e2e/e2e_summary_*.json test/e2e/e2e_summary_*.xml From f6da884ba0d084d699644bbc57b04f4c1adf92f3 Mon Sep 17 00:00:00 2001 From: Nikita Korolev Date: Thu, 25 Dec 2025 17:01:29 +0300 Subject: [PATCH 58/71] change wf, add prepare-report job Signed-off-by: Nikita Korolev --- .github/workflows/e2e-reusable-pipeline.yml | 106 +++++++++++++------- 1 file changed, 71 insertions(+), 35 deletions(-) diff --git a/.github/workflows/e2e-reusable-pipeline.yml b/.github/workflows/e2e-reusable-pipeline.yml index 35250e9b3f..71c0f01ddf 100644 --- a/.github/workflows/e2e-reusable-pipeline.yml +++ b/.github/workflows/e2e-reusable-pipeline.yml @@ -77,7 +77,7 @@ on: outputs: artifact-name: description: "Name of the uploaded artifact with E2E report" - value: ${{ jobs.e2e-test.outputs.artifact-name }} + value: ${{ jobs.prepare-report.outputs.artifact-name }} env: BRANCH: ${{ inputs.branch }} @@ -979,8 +979,6 @@ jobs: e2e-test: name: E2E test (${{ inputs.storage_type }}) runs-on: ubuntu-22.04 - outputs: - artifact-name: ${{ steps.set-artifact-name.outputs.artifact-name }} needs: - bootstrap - configure-storage @@ -1108,9 +1106,35 @@ jobs: echo "[INFO] Exit code: $GINKGO_EXIT_CODE" exit $GINKGO_EXIT_CODE + - name: Upload summary test results (junit/xml) + uses: actions/upload-artifact@v4 + id: e2e-report-artifact + if: always() + with: + name: e2e-test-results-${{ inputs.storage_type }}-${{ github.run_id }} + path: | + test/e2e/e2e_summary_*.json + test/e2e/e2e_summary_*.xml + test/e2e/*junit*.xml + if-no-files-found: ignore + retention-days: 1 + + prepare-report: + name: Prepare E2E report (${{ inputs.storage_type }}) + runs-on: ubuntu-latest + needs: + - bootstrap + - configure-storage + - configure-virtualization + - e2e-test + if: always() + outputs: + artifact-name: ${{ steps.set-artifact-name.outputs.artifact-name }} + steps: + - uses: actions/checkout@v4 + - name: Determine failed stage and prepare report id: determine-stage - if: always() run: | # Get branch name BRANCH_NAME="${{ github.head_ref || github.ref_name }}" @@ -1118,14 +1142,18 @@ jobs: BRANCH_NAME="${{ github.ref_name }}" fi - # Function to create failure summary JSON + # Function to create failure summary JSON with proper job URL create_failure_summary() { local stage=$1 local status_msg=$2 + local job_name=$3 local csi="${{ inputs.storage_type }}" local date=$(date +"%Y-%m-%d") local start_time=$(date +"%H:%M:%S") local branch="$BRANCH_NAME" + # Create URL pointing to the failed job in the workflow run + # Format: https://github.com/{owner}/{repo}/actions/runs/{run_id} + # The job name will be visible in the workflow run view local link="${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}" jq -n \ @@ -1152,33 +1180,46 @@ jobs: # Determine which stage failed and prepare report REPORT_JSON="" FAILED_STAGE="" + FAILED_JOB_NAME="" if [ "${{ needs.bootstrap.result }}" != "success" ]; then FAILED_STAGE="bootstrap" - REPORT_JSON=$(create_failure_summary "bootstrap" ":x: BOOTSTRAP CLUSTER FAILED") + FAILED_JOB_NAME="Bootstrap cluster (${{ inputs.storage_type }})" + REPORT_JSON=$(create_failure_summary "bootstrap" ":x: BOOTSTRAP CLUSTER FAILED" "$FAILED_JOB_NAME") elif [ "${{ needs.configure-storage.result }}" != "success" ]; then FAILED_STAGE="storage-setup" - REPORT_JSON=$(create_failure_summary "storage-setup" ":x: STORAGE SETUP FAILED") + FAILED_JOB_NAME="Configure storage (${{ inputs.storage_type }})" + REPORT_JSON=$(create_failure_summary "storage-setup" ":x: STORAGE SETUP FAILED" "$FAILED_JOB_NAME") elif [ "${{ needs.configure-virtualization.result }}" != "success" ]; then FAILED_STAGE="virtualization-setup" - REPORT_JSON=$(create_failure_summary "virtualization-setup" ":x: VIRTUALIZATION SETUP FAILED") - elif [ "${{ steps.e2e-report.outcome }}" != "success" ]; then + FAILED_JOB_NAME="Configure Virtualization (${{ inputs.storage_type }})" + REPORT_JSON=$(create_failure_summary "virtualization-setup" ":x: VIRTUALIZATION SETUP FAILED" "$FAILED_JOB_NAME") + elif [ "${{ needs.e2e-test.result }}" != "success" ]; then FAILED_STAGE="e2e-test" - # Use report from e2e-report step if available - E2E_REPORT="${{ steps.e2e-report.outputs.summary }}" - if [ -n "$E2E_REPORT" ] && [ "$E2E_REPORT" != "" ]; then - REPORT_JSON="$E2E_REPORT" + FAILED_JOB_NAME="E2E test (${{ inputs.storage_type }})" + # Try to get report from downloaded artifact or local file + E2E_REPORT_FILE=$(find test/e2e -name "e2e_summary_${{ inputs.storage_type }}_*.json" -type f 2>/dev/null | head -1) + if [ -n "$E2E_REPORT_FILE" ] && [ -f "$E2E_REPORT_FILE" ]; then + # Load existing report and update status if needed + REPORT_JSON=$(cat "$E2E_REPORT_FILE" | jq -c .) + # Check if status indicates failure + CURRENT_STATUS=$(echo "$REPORT_JSON" | jq -r '.Status // ""') + if [[ "$CURRENT_STATUS" != *"FAILED"* ]] && [[ "$CURRENT_STATUS" != *"SUCCESS"* ]]; then + # Update status to failure + REPORT_JSON=$(echo "$REPORT_JSON" | jq -c '.Status = ":x: E2E TEST FAILED"') + fi else - REPORT_JSON=$(create_failure_summary "e2e-test" ":x: E2E TEST FAILED") + REPORT_JSON=$(create_failure_summary "e2e-test" ":x: E2E TEST FAILED" "$FAILED_JOB_NAME") fi else FAILED_STAGE="success" - # Use report from e2e-report step - E2E_REPORT="${{ steps.e2e-report.outputs.summary }}" - if [ -n "$E2E_REPORT" ] && [ "$E2E_REPORT" != "" ]; then - REPORT_JSON="$E2E_REPORT" + FAILED_JOB_NAME="E2E test (${{ inputs.storage_type }})" + # Try to load report from e2e-test job + E2E_REPORT_FILE=$(find test/e2e -name "e2e_summary_${{ inputs.storage_type }}_*.json" -type f 2>/dev/null | head -1) + if [ -n "$E2E_REPORT_FILE" ] && [ -f "$E2E_REPORT_FILE" ]; then + REPORT_JSON=$(cat "$E2E_REPORT_FILE" | jq -c .) else - REPORT_JSON=$(create_failure_summary "success" ":white_check_mark: SUCCESS!") + REPORT_JSON=$(create_failure_summary "success" ":white_check_mark: SUCCESS!" "$FAILED_JOB_NAME") fi fi @@ -1190,11 +1231,13 @@ jobs: --argjson report "$REPORT_JSON_PARSED" \ --arg storage_type "${{ inputs.storage_type }}" \ --arg failed_stage "$FAILED_STAGE" \ + --arg failed_job_name "$FAILED_JOB_NAME" \ --arg workflow_run_id "${{ github.run_id }}" \ --arg workflow_run_url "${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}" \ '{ storage_type: $storage_type, failed_stage: $failed_stage, + failed_job_name: $failed_job_name, workflow_run_id: $workflow_run_id, workflow_run_url: $workflow_run_url, report: $report @@ -1202,39 +1245,32 @@ jobs: echo "report_file=$REPORT_FILE" >> $GITHUB_OUTPUT echo "[INFO] Created report file: $REPORT_FILE" + echo "[INFO] Failed stage: $FAILED_STAGE" + echo "[INFO] Failed job: $FAILED_JOB_NAME" cat "$REPORT_FILE" | jq . + - name: Download E2E test results if available + uses: actions/download-artifact@v5 + continue-on-error: true + with: + name: e2e-test-results-${{ inputs.storage_type }}-${{ github.run_id }} + path: test/e2e/ + - name: Upload E2E report artifact id: upload-artifact - if: always() uses: actions/upload-artifact@v4 with: name: e2e-report-${{ inputs.storage_type }}-${{ github.run_id }} path: ${{ steps.determine-stage.outputs.report_file }} retention-days: 1 - continue-on-error: true - name: Set artifact name output id: set-artifact-name - if: always() run: | ARTIFACT_NAME="e2e-report-${{ inputs.storage_type }}-${{ github.run_id }}" echo "artifact-name=$ARTIFACT_NAME" >> $GITHUB_OUTPUT echo "[INFO] Artifact name: $ARTIFACT_NAME" - - name: Upload summary test results (junit/xml) - uses: actions/upload-artifact@v4 - id: e2e-report-artifact - if: always() - with: - name: e2e-test-results-${{ inputs.storage_type }}-${{ github.run_id }} - path: | - test/e2e/e2e_summary_*.json - test/e2e/e2e_summary_*.xml - test/e2e/*junit*.xml - if-no-files-found: ignore - retention-days: 1 - undeploy-cluster: name: Undeploy cluster (${{ inputs.storage_type }}) runs-on: ubuntu-latest From 6edcd58c6311cf989ef2a5dce39b1a53e5fadd70 Mon Sep 17 00:00:00 2001 From: Nikita Korolev Date: Thu, 25 Dec 2025 18:41:46 +0300 Subject: [PATCH 59/71] fix report Signed-off-by: Nikita Korolev --- .github/workflows/e2e-matrix.yml | 64 +++++++++++++++++++-- .github/workflows/e2e-reusable-pipeline.yml | 63 +++++++++++--------- 2 files changed, 95 insertions(+), 32 deletions(-) diff --git a/.github/workflows/e2e-matrix.yml b/.github/workflows/e2e-matrix.yml index 22a6e1e396..4feb6061a5 100644 --- a/.github/workflows/e2e-matrix.yml +++ b/.github/workflows/e2e-matrix.yml @@ -82,12 +82,28 @@ jobs: - name: Download E2E report artifacts uses: actions/download-artifact@v5 continue-on-error: true - id: download-artifacts + id: download-artifacts-pattern with: pattern: "e2e-report-*" path: downloaded-artifacts/ merge-multiple: false + - name: Download E2E report artifacts (by name - ceph) + uses: actions/download-artifact@v5 + continue-on-error: true + id: download-artifacts-ceph + with: + name: ${{ needs.e2e-ceph.outputs.artifact-name }} + path: downloaded-artifacts/ceph/ + + - name: Download E2E report artifacts (by name - replicated) + uses: actions/download-artifact@v5 + continue-on-error: true + id: download-artifacts-replicated + with: + name: ${{ needs.e2e-replicated.outputs.artifact-name }} + path: downloaded-artifacts/replicated/ + - name: Send results to channel run: | # Map storage types to CSI names @@ -110,27 +126,63 @@ jobs: load_report_from_artifact() { local storage_type=$1 local base_path="downloaded-artifacts/" + local specific_path="${base_path}${storage_type}/" + + echo "[DEBUG] Searching for report for storage type: $storage_type" + echo "[DEBUG] Base path: $base_path" + echo "[DEBUG] Specific path: $specific_path" - # Find report file - search in all subdirectories matching the storage type pattern - # Artifacts are downloaded with names like "e2e-report-{storage_type}-{run_id}" - # and placed in subdirectories with the same name - local report_file=$(find "$base_path" -type f -name "e2e_report_${storage_type}.json" 2>/dev/null | head -1) + # List all downloaded artifacts for debugging + if [ -d "$base_path" ]; then + echo "[DEBUG] Contents of downloaded-artifacts:" + find "$base_path" -type f -name "*.json" 2>/dev/null | head -10 || echo "No JSON files found" + echo "[DEBUG] Directories in downloaded-artifacts:" + find "$base_path" -type d -maxdepth 2 2>/dev/null || echo "No directories found" + else + echo "[DEBUG] Base path does not exist" + fi + + # First, try to find in specific storage type directory + local report_file="" + if [ -d "$specific_path" ]; then + echo "[DEBUG] Searching in specific path: $specific_path" + report_file=$(find "$specific_path" -name "e2e_report_*.json" -type f 2>/dev/null | head -1) + if [ -n "$report_file" ] && [ -f "$report_file" ]; then + echo "[INFO] Found report file in specific path: $report_file" + cat "$report_file" + return 0 + fi + fi + + # Second, search in all subdirectories matching the storage type pattern + # Artifacts downloaded by pattern are placed in subdirectories with artifact name + report_file=$(find "$base_path" -type f -name "e2e_report_${storage_type}.json" 2>/dev/null | head -1) # Alternative: search by artifact directory name pattern if [ -z "$report_file" ] || [ ! -f "$report_file" ]; then + echo "[DEBUG] First search failed, trying alternative search" local artifact_dir=$(find "$base_path" -type d -name "e2e-report-${storage_type}-*" 2>/dev/null | head -1) + echo "[DEBUG] Found artifact dir: $artifact_dir" if [ -n "$artifact_dir" ]; then report_file=$(find "$artifact_dir" -name "e2e_report_*.json" -type f 2>/dev/null | head -1) + echo "[DEBUG] Found report file in artifact dir: $report_file" fi fi + # Last resort: search for any e2e_report file + if [ -z "$report_file" ] || [ ! -f "$report_file" ]; then + echo "[DEBUG] Alternative search failed, trying broad search" + report_file=$(find "$base_path" -name "e2e_report_*.json" -type f 2>/dev/null | head -1) + echo "[DEBUG] Found report file (broad search): $report_file" + fi + if [ -n "$report_file" ] && [ -f "$report_file" ]; then echo "[INFO] Found report file: $report_file" cat "$report_file" return 0 else echo "[WARN] Report file not found for storage type: $storage_type" - echo "[DEBUG] Searched in: $base_path" + echo "[DEBUG] Final search path: $base_path" return 1 fi } diff --git a/.github/workflows/e2e-reusable-pipeline.yml b/.github/workflows/e2e-reusable-pipeline.yml index 71c0f01ddf..4b270b08ae 100644 --- a/.github/workflows/e2e-reusable-pipeline.yml +++ b/.github/workflows/e2e-reusable-pipeline.yml @@ -1133,6 +1133,13 @@ jobs: steps: - uses: actions/checkout@v4 + - name: Download E2E test results if available + uses: actions/download-artifact@v5 + continue-on-error: true + with: + name: e2e-test-results-${{ inputs.storage_type }}-${{ github.run_id }} + path: test/e2e/ + - name: Determine failed stage and prepare report id: determine-stage run: | @@ -1177,48 +1184,59 @@ jobs: }' } - # Determine which stage failed and prepare report + # Try to find and load E2E test report + E2E_REPORT_FILE="" REPORT_JSON="" + + # Search for report file in test/e2e directory + E2E_REPORT_FILE=$(find test/e2e -name "e2e_summary_${{ inputs.storage_type }}_*.json" -type f 2>/dev/null | head -1) + + if [ -n "$E2E_REPORT_FILE" ] && [ -f "$E2E_REPORT_FILE" ]; then + echo "[INFO] Found E2E report file: $E2E_REPORT_FILE" + REPORT_JSON=$(cat "$E2E_REPORT_FILE" | jq -c .) + echo "[INFO] Loaded report from file" + echo "$REPORT_JSON" | jq . + fi + + # Determine which stage failed and prepare report FAILED_STAGE="" FAILED_JOB_NAME="" if [ "${{ needs.bootstrap.result }}" != "success" ]; then FAILED_STAGE="bootstrap" FAILED_JOB_NAME="Bootstrap cluster (${{ inputs.storage_type }})" - REPORT_JSON=$(create_failure_summary "bootstrap" ":x: BOOTSTRAP CLUSTER FAILED" "$FAILED_JOB_NAME") + if [ -z "$REPORT_JSON" ] || [ "$REPORT_JSON" == "" ]; then + REPORT_JSON=$(create_failure_summary "bootstrap" ":x: BOOTSTRAP CLUSTER FAILED" "$FAILED_JOB_NAME") + fi elif [ "${{ needs.configure-storage.result }}" != "success" ]; then FAILED_STAGE="storage-setup" FAILED_JOB_NAME="Configure storage (${{ inputs.storage_type }})" - REPORT_JSON=$(create_failure_summary "storage-setup" ":x: STORAGE SETUP FAILED" "$FAILED_JOB_NAME") + if [ -z "$REPORT_JSON" ] || [ "$REPORT_JSON" == "" ]; then + REPORT_JSON=$(create_failure_summary "storage-setup" ":x: STORAGE SETUP FAILED" "$FAILED_JOB_NAME") + fi elif [ "${{ needs.configure-virtualization.result }}" != "success" ]; then FAILED_STAGE="virtualization-setup" FAILED_JOB_NAME="Configure Virtualization (${{ inputs.storage_type }})" - REPORT_JSON=$(create_failure_summary "virtualization-setup" ":x: VIRTUALIZATION SETUP FAILED" "$FAILED_JOB_NAME") + if [ -z "$REPORT_JSON" ] || [ "$REPORT_JSON" == "" ]; then + REPORT_JSON=$(create_failure_summary "virtualization-setup" ":x: VIRTUALIZATION SETUP FAILED" "$FAILED_JOB_NAME") + fi elif [ "${{ needs.e2e-test.result }}" != "success" ]; then FAILED_STAGE="e2e-test" FAILED_JOB_NAME="E2E test (${{ inputs.storage_type }})" - # Try to get report from downloaded artifact or local file - E2E_REPORT_FILE=$(find test/e2e -name "e2e_summary_${{ inputs.storage_type }}_*.json" -type f 2>/dev/null | head -1) - if [ -n "$E2E_REPORT_FILE" ] && [ -f "$E2E_REPORT_FILE" ]; then - # Load existing report and update status if needed - REPORT_JSON=$(cat "$E2E_REPORT_FILE" | jq -c .) - # Check if status indicates failure + if [ -z "$REPORT_JSON" ] || [ "$REPORT_JSON" == "" ]; then + REPORT_JSON=$(create_failure_summary "e2e-test" ":x: E2E TEST FAILED" "$FAILED_JOB_NAME") + else + # Report exists, but check if status needs to be updated CURRENT_STATUS=$(echo "$REPORT_JSON" | jq -r '.Status // ""') - if [[ "$CURRENT_STATUS" != *"FAILED"* ]] && [[ "$CURRENT_STATUS" != *"SUCCESS"* ]]; then - # Update status to failure + if [[ "$CURRENT_STATUS" != *"FAIL"* ]] && [[ "$CURRENT_STATUS" != *"SUCCESS"* ]]; then + # Update status to failure if not already set REPORT_JSON=$(echo "$REPORT_JSON" | jq -c '.Status = ":x: E2E TEST FAILED"') fi - else - REPORT_JSON=$(create_failure_summary "e2e-test" ":x: E2E TEST FAILED" "$FAILED_JOB_NAME") fi else FAILED_STAGE="success" FAILED_JOB_NAME="E2E test (${{ inputs.storage_type }})" - # Try to load report from e2e-test job - E2E_REPORT_FILE=$(find test/e2e -name "e2e_summary_${{ inputs.storage_type }}_*.json" -type f 2>/dev/null | head -1) - if [ -n "$E2E_REPORT_FILE" ] && [ -f "$E2E_REPORT_FILE" ]; then - REPORT_JSON=$(cat "$E2E_REPORT_FILE" | jq -c .) - else + if [ -z "$REPORT_JSON" ] || [ "$REPORT_JSON" == "" ]; then REPORT_JSON=$(create_failure_summary "success" ":white_check_mark: SUCCESS!" "$FAILED_JOB_NAME") fi fi @@ -1249,13 +1267,6 @@ jobs: echo "[INFO] Failed job: $FAILED_JOB_NAME" cat "$REPORT_FILE" | jq . - - name: Download E2E test results if available - uses: actions/download-artifact@v5 - continue-on-error: true - with: - name: e2e-test-results-${{ inputs.storage_type }}-${{ github.run_id }} - path: test/e2e/ - - name: Upload E2E report artifact id: upload-artifact uses: actions/upload-artifact@v4 From c2216084ecac94e8bff8c458c73348a485e29ea4 Mon Sep 17 00:00:00 2001 From: Nikita Korolev Date: Thu, 25 Dec 2025 19:44:11 +0300 Subject: [PATCH 60/71] fix report Signed-off-by: Nikita Korolev --- .github/workflows/e2e-matrix.yml | 106 +++++++++++++++++++++---------- 1 file changed, 72 insertions(+), 34 deletions(-) diff --git a/.github/workflows/e2e-matrix.yml b/.github/workflows/e2e-matrix.yml index 4feb6061a5..1620b3de9c 100644 --- a/.github/workflows/e2e-matrix.yml +++ b/.github/workflows/e2e-matrix.yml @@ -104,6 +104,21 @@ jobs: name: ${{ needs.e2e-replicated.outputs.artifact-name }} path: downloaded-artifacts/replicated/ + - name: Verify artifact downloads + run: | + echo "[INFO] Verifying artifact downloads..." + echo "[INFO] Artifact name from e2e-ceph: ${{ needs.e2e-ceph.outputs.artifact-name }}" + echo "[INFO] Artifact name from e2e-replicated: ${{ needs.e2e-replicated.outputs.artifact-name }}" + echo "[INFO] Pattern download result: ${{ steps.download-artifacts-pattern.outcome }}" + echo "[INFO] Ceph download result: ${{ steps.download-artifacts-ceph.outcome }}" + echo "[INFO] Replicated download result: ${{ steps.download-artifacts-replicated.outcome }}" + if [ -d "downloaded-artifacts" ]; then + echo "[INFO] Downloaded artifacts structure:" + find downloaded-artifacts -type f -o -type d | head -20 + else + echo "[WARN] downloaded-artifacts directory does not exist" + fi + - name: Send results to channel run: | # Map storage types to CSI names @@ -123,68 +138,76 @@ jobs: } # Function to load and parse report from artifact + # Outputs: file content to stdout, debug messages to stderr load_report_from_artifact() { local storage_type=$1 local base_path="downloaded-artifacts/" local specific_path="${base_path}${storage_type}/" - echo "[DEBUG] Searching for report for storage type: $storage_type" - echo "[DEBUG] Base path: $base_path" - echo "[DEBUG] Specific path: $specific_path" + echo "[INFO] Searching for report for storage type: $storage_type" >&2 + echo "[DEBUG] Base path: $base_path" >&2 + echo "[DEBUG] Specific path: $specific_path" >&2 # List all downloaded artifacts for debugging if [ -d "$base_path" ]; then - echo "[DEBUG] Contents of downloaded-artifacts:" - find "$base_path" -type f -name "*.json" 2>/dev/null | head -10 || echo "No JSON files found" - echo "[DEBUG] Directories in downloaded-artifacts:" - find "$base_path" -type d -maxdepth 2 2>/dev/null || echo "No directories found" + echo "[DEBUG] Contents of downloaded-artifacts:" >&2 + find "$base_path" -type f -name "*.json" 2>/dev/null | head -20 >&2 || echo "No JSON files found" >&2 + echo "[DEBUG] Directories in downloaded-artifacts:" >&2 + find "$base_path" -type d -maxdepth 3 2>/dev/null >&2 || echo "No directories found" >&2 else - echo "[DEBUG] Base path does not exist" + echo "[WARN] Base path does not exist: $base_path" >&2 + return 1 fi - # First, try to find in specific storage type directory local report_file="" + + # First, try to find in specific storage type directory (when downloaded by name) if [ -d "$specific_path" ]; then - echo "[DEBUG] Searching in specific path: $specific_path" + echo "[DEBUG] Searching in specific path: $specific_path" >&2 report_file=$(find "$specific_path" -name "e2e_report_*.json" -type f 2>/dev/null | head -1) if [ -n "$report_file" ] && [ -f "$report_file" ]; then - echo "[INFO] Found report file in specific path: $report_file" + echo "[INFO] Found report file in specific path: $report_file" >&2 cat "$report_file" return 0 fi fi - # Second, search in all subdirectories matching the storage type pattern - # Artifacts downloaded by pattern are placed in subdirectories with artifact name - report_file=$(find "$base_path" -type f -name "e2e_report_${storage_type}.json" 2>/dev/null | head -1) - - # Alternative: search by artifact directory name pattern - if [ -z "$report_file" ] || [ ! -f "$report_file" ]; then - echo "[DEBUG] First search failed, trying alternative search" - local artifact_dir=$(find "$base_path" -type d -name "e2e-report-${storage_type}-*" 2>/dev/null | head -1) - echo "[DEBUG] Found artifact dir: $artifact_dir" - if [ -n "$artifact_dir" ]; then - report_file=$(find "$artifact_dir" -name "e2e_report_*.json" -type f 2>/dev/null | head -1) - echo "[DEBUG] Found report file in artifact dir: $report_file" + # Second, search in artifact directories (when downloaded by pattern) + # Pattern downloads create subdirectories named after the artifact + # e.g., downloaded-artifacts/e2e-report-replicated-/e2e_report_replicated.json + echo "[DEBUG] Searching in artifact directories matching pattern: e2e-report-${storage_type}-*" >&2 + local artifact_dir=$(find "$base_path" -type d -name "e2e-report-${storage_type}-*" 2>/dev/null | head -1) + if [ -n "$artifact_dir" ]; then + echo "[DEBUG] Found artifact dir: $artifact_dir" >&2 + report_file=$(find "$artifact_dir" -name "e2e_report_*.json" -type f 2>/dev/null | head -1) + if [ -n "$report_file" ] && [ -f "$report_file" ]; then + echo "[INFO] Found report file in artifact dir: $report_file" >&2 + cat "$report_file" + return 0 fi fi - # Last resort: search for any e2e_report file - if [ -z "$report_file" ] || [ ! -f "$report_file" ]; then - echo "[DEBUG] Alternative search failed, trying broad search" - report_file=$(find "$base_path" -name "e2e_report_*.json" -type f 2>/dev/null | head -1) - echo "[DEBUG] Found report file (broad search): $report_file" + # Third, search for file by name pattern anywhere in base_path + echo "[DEBUG] Searching for file: e2e_report_${storage_type}.json" >&2 + report_file=$(find "$base_path" -type f -name "e2e_report_${storage_type}.json" 2>/dev/null | head -1) + if [ -n "$report_file" ] && [ -f "$report_file" ]; then + echo "[INFO] Found report file by name: $report_file" >&2 + cat "$report_file" + return 0 fi + # Last resort: search for any e2e_report file matching storage type + echo "[DEBUG] Last resort: searching for any e2e_report_*.json containing ${storage_type}" >&2 + report_file=$(find "$base_path" -type f -name "*e2e_report*${storage_type}*.json" 2>/dev/null | head -1) if [ -n "$report_file" ] && [ -f "$report_file" ]; then - echo "[INFO] Found report file: $report_file" + echo "[INFO] Found report file (last resort): $report_file" >&2 cat "$report_file" return 0 - else - echo "[WARN] Report file not found for storage type: $storage_type" - echo "[DEBUG] Final search path: $base_path" - return 1 fi + + echo "[WARN] Could not load report artifact for $storage_type" >&2 + echo "[DEBUG] Final search path: $base_path" >&2 + return 1 } # Function to create failure summary JSON (fallback) @@ -313,7 +336,22 @@ jobs: echo "[INFO] Processing $storage" # Try to load report from artifact - structured_report=$(load_report_from_artifact "$storage" 2>/dev/null || true) + # Debug messages go to stderr (visible in logs), JSON content goes to stdout + echo "[INFO] Attempting to load report for $storage" + structured_report=$(load_report_from_artifact "$storage" || true) + + if [ -n "$structured_report" ]; then + # Check if it's valid JSON + if echo "$structured_report" | jq empty 2>/dev/null; then + echo "[INFO] Report is valid JSON for $storage" + else + echo "[WARN] Report is not valid JSON for $storage" + echo "[DEBUG] Raw report content (first 200 chars):" + echo "$structured_report" | head -c 200 + echo "" + structured_report="" + fi + fi if [ -n "$structured_report" ] && echo "$structured_report" | jq empty 2>/dev/null; then # Extract report data from structured file From 51fe3d1da2eac59d95f45380fc640be8775420b8 Mon Sep 17 00:00:00 2001 From: Nikita Korolev Date: Thu, 25 Dec 2025 21:02:08 +0300 Subject: [PATCH 61/71] add timeout for checking virt-handler Signed-off-by: Nikita Korolev --- .github/workflows/e2e-reusable-pipeline.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/e2e-reusable-pipeline.yml b/.github/workflows/e2e-reusable-pipeline.yml index 4b270b08ae..72d6bf37ae 100644 --- a/.github/workflows/e2e-reusable-pipeline.yml +++ b/.github/workflows/e2e-reusable-pipeline.yml @@ -974,8 +974,8 @@ jobs: echo "[INFO] Checking Virtualization module deployments" kubectl -n d8-virtualization wait --for=condition=Available deploy --all --timeout 300s - echo "[INFO] Checking virt-handler pods" - kubectl -n d8-virtualization wait --for=condition=Ready pods -l kubevirt.internal.virtualization.deckhouse.io=virt-handler + echo "[INFO] Checking virt-handler pods (timeout 300s)" + kubectl -n d8-virtualization wait --for=condition=Ready pods -l kubevirt.internal.virtualization.deckhouse.io=virt-handler --timeout 300s e2e-test: name: E2E test (${{ inputs.storage_type }}) runs-on: ubuntu-22.04 From 25b1d28a379f6c7962ff2437b80607450eb8f36f Mon Sep 17 00:00:00 2001 From: Nikita Korolev Date: Fri, 26 Dec 2025 15:21:15 +0300 Subject: [PATCH 62/71] fix branch name in summary Signed-off-by: Nikita Korolev --- .github/workflows/e2e-reusable-pipeline.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/e2e-reusable-pipeline.yml b/.github/workflows/e2e-reusable-pipeline.yml index 72d6bf37ae..62dc5ca778 100644 --- a/.github/workflows/e2e-reusable-pipeline.yml +++ b/.github/workflows/e2e-reusable-pipeline.yml @@ -1077,7 +1077,7 @@ jobs: --arg csi "$CSI" \ --arg date "$DATE" \ --arg startTime "$START_TIME" \ - --arg branch "$GITHUB_REF_NAME" \ + --arg branch "${GITHUB_HEAD_REF:-${GITHUB_REF#refs/heads/}}" \ --arg status "$RESULT_STATUS" \ --argjson passed "$PASSED" \ --argjson failed "$FAILED" \ From bded5c14052926e60832979535e914d1d2d8ad7b Mon Sep 17 00:00:00 2001 From: Nikita Korolev Date: Fri, 26 Dec 2025 15:23:57 +0300 Subject: [PATCH 63/71] fix deploy template custom-certificate Signed-off-by: Nikita Korolev --- templates/custom-certificate.yaml | 37 ------------------------------- 1 file changed, 37 deletions(-) diff --git a/templates/custom-certificate.yaml b/templates/custom-certificate.yaml index 96586e3b38..a2fded6222 100644 --- a/templates/custom-certificate.yaml +++ b/templates/custom-certificate.yaml @@ -1,38 +1 @@ -{{- /* Copy from helm-lib to work with custom certificate retrieved by the common hook in module-sdk. }} */ -}} -{{- /* Values contain non-encoded certificates, we need to base64 them for the Secret data. }} */ -}} - -{{- /* Usage: {{ include "helm_lib_module_https_copy_custom_certificate" (list . "namespace" "secret_name_prefix") }} */ -}} -{{- /* Renders secret with [custom certificate](https://deckhouse.io/products/kubernetes-platform/documentation/v1/deckhouse-configure-global.html#parameters-modules-https-customcertificate) */ -}} -{{- /* in passed namespace with passed prefix */ -}} -{{/* -{{- define "override_until_fixed::helm_lib_module_https_copy_custom_certificate" -}} - {{- $context := index . 0 -}} {{- / Template context with .Values, .Chart, etc / -}} - {{- $namespace := index . 1 -}} {{- / Namespace / -}} - {{- $secret_name_prefix := index . 2 -}} {{- / Secret name prefix / -}} - {{- $mode := include "helm_lib_module_https_mode" $context -}} - {{- if eq $mode "CustomCertificate" -}} - {{- $module_values := (index $context.Values (include "helm_lib_module_camelcase_name" $context)) -}} - {{- $secret_name := include "helm_lib_module_https_secret_name" (list $context $secret_name_prefix) -}} ---- -apiVersion: v1 -kind: Secret -metadata: - name: {{ $secret_name }} - namespace: {{ $namespace }} - {{- include "helm_lib_module_labels" (list $context) | nindent 2 }} -type: kubernetes.io/tls -data: -{{- if (hasKey $module_values.internal.customCertificateData "ca.crt") }} - ca.crt: {{ index $module_values.internal.customCertificateData "ca.crt" | b64enc }} -{{- end }} - tls.crt: {{ index $module_values.internal.customCertificateData "tls.crt" | b64enc }} - tls.key: {{ index $module_values.internal.customCertificateData "tls.key" | b64enc }} - {{- end -}} -{{- end -}} - - - -{{- include "override_until_fixed::helm_lib_module_https_copy_custom_certificate" (list . "d8-virtualization" "ingress-tls") -}} -*/}} - {{- include "helm_lib_module_https_copy_custom_certificate" (list . "d8-virtualization" "ingress-tls") -}} From 4f39ec5dcef18dd631629acbae2e6cf243ecc0d2 Mon Sep 17 00:00:00 2001 From: Nikita Korolev Date: Fri, 26 Dec 2025 16:04:35 +0300 Subject: [PATCH 64/71] refactor determine-stage and 'Send results to channel' Signed-off-by: Nikita Korolev --- .github/workflows/e2e-matrix.yml | 81 ++---------------- .github/workflows/e2e-reusable-pipeline.yml | 94 ++++++++++----------- 2 files changed, 51 insertions(+), 124 deletions(-) diff --git a/.github/workflows/e2e-matrix.yml b/.github/workflows/e2e-matrix.yml index 1620b3de9c..dcf4cf7b8b 100644 --- a/.github/workflows/e2e-matrix.yml +++ b/.github/workflows/e2e-matrix.yml @@ -88,37 +88,6 @@ jobs: path: downloaded-artifacts/ merge-multiple: false - - name: Download E2E report artifacts (by name - ceph) - uses: actions/download-artifact@v5 - continue-on-error: true - id: download-artifacts-ceph - with: - name: ${{ needs.e2e-ceph.outputs.artifact-name }} - path: downloaded-artifacts/ceph/ - - - name: Download E2E report artifacts (by name - replicated) - uses: actions/download-artifact@v5 - continue-on-error: true - id: download-artifacts-replicated - with: - name: ${{ needs.e2e-replicated.outputs.artifact-name }} - path: downloaded-artifacts/replicated/ - - - name: Verify artifact downloads - run: | - echo "[INFO] Verifying artifact downloads..." - echo "[INFO] Artifact name from e2e-ceph: ${{ needs.e2e-ceph.outputs.artifact-name }}" - echo "[INFO] Artifact name from e2e-replicated: ${{ needs.e2e-replicated.outputs.artifact-name }}" - echo "[INFO] Pattern download result: ${{ steps.download-artifacts-pattern.outcome }}" - echo "[INFO] Ceph download result: ${{ steps.download-artifacts-ceph.outcome }}" - echo "[INFO] Replicated download result: ${{ steps.download-artifacts-replicated.outcome }}" - if [ -d "downloaded-artifacts" ]; then - echo "[INFO] Downloaded artifacts structure:" - find downloaded-artifacts -type f -o -type d | head -20 - else - echo "[WARN] downloaded-artifacts directory does not exist" - fi - - name: Send results to channel run: | # Map storage types to CSI names @@ -139,42 +108,25 @@ jobs: # Function to load and parse report from artifact # Outputs: file content to stdout, debug messages to stderr + # Works with pattern-based artifact download (e2e-report-*) + # Artifacts are organized as: downloaded-artifacts/e2e-report--/e2e_report_.json load_report_from_artifact() { local storage_type=$1 local base_path="downloaded-artifacts/" - local specific_path="${base_path}${storage_type}/" echo "[INFO] Searching for report for storage type: $storage_type" >&2 echo "[DEBUG] Base path: $base_path" >&2 - echo "[DEBUG] Specific path: $specific_path" >&2 - # List all downloaded artifacts for debugging - if [ -d "$base_path" ]; then - echo "[DEBUG] Contents of downloaded-artifacts:" >&2 - find "$base_path" -type f -name "*.json" 2>/dev/null | head -20 >&2 || echo "No JSON files found" >&2 - echo "[DEBUG] Directories in downloaded-artifacts:" >&2 - find "$base_path" -type d -maxdepth 3 2>/dev/null >&2 || echo "No directories found" >&2 - else + if [ ! -d "$base_path" ]; then echo "[WARN] Base path does not exist: $base_path" >&2 return 1 fi local report_file="" - # First, try to find in specific storage type directory (when downloaded by name) - if [ -d "$specific_path" ]; then - echo "[DEBUG] Searching in specific path: $specific_path" >&2 - report_file=$(find "$specific_path" -name "e2e_report_*.json" -type f 2>/dev/null | head -1) - if [ -n "$report_file" ] && [ -f "$report_file" ]; then - echo "[INFO] Found report file in specific path: $report_file" >&2 - cat "$report_file" - return 0 - fi - fi - - # Second, search in artifact directories (when downloaded by pattern) + # First, search in artifact directories matching pattern: e2e-report--* # Pattern downloads create subdirectories named after the artifact - # e.g., downloaded-artifacts/e2e-report-replicated-/e2e_report_replicated.json + # e.g., downloaded-artifacts/e2e-report-ceph-/e2e_report_ceph.json echo "[DEBUG] Searching in artifact directories matching pattern: e2e-report-${storage_type}-*" >&2 local artifact_dir=$(find "$base_path" -type d -name "e2e-report-${storage_type}-*" 2>/dev/null | head -1) if [ -n "$artifact_dir" ]; then @@ -187,7 +139,7 @@ jobs: fi fi - # Third, search for file by name pattern anywhere in base_path + # Fallback: search for file by name pattern anywhere in base_path echo "[DEBUG] Searching for file: e2e_report_${storage_type}.json" >&2 report_file=$(find "$base_path" -type f -name "e2e_report_${storage_type}.json" 2>/dev/null | head -1) if [ -n "$report_file" ] && [ -f "$report_file" ]; then @@ -196,17 +148,7 @@ jobs: return 0 fi - # Last resort: search for any e2e_report file matching storage type - echo "[DEBUG] Last resort: searching for any e2e_report_*.json containing ${storage_type}" >&2 - report_file=$(find "$base_path" -type f -name "*e2e_report*${storage_type}*.json" 2>/dev/null | head -1) - if [ -n "$report_file" ] && [ -f "$report_file" ]; then - echo "[INFO] Found report file (last resort): $report_file" >&2 - cat "$report_file" - return 0 - fi - echo "[WARN] Could not load report artifact for $storage_type" >&2 - echo "[DEBUG] Final search path: $base_path" >&2 return 1 } @@ -294,16 +236,6 @@ jobs: [ -z "$skipped" ] && skipped=0 [ -z "$status" ] && status=":question: UNKNOWN" - # Validate date - if [ -n "$date" ] && [ "$date" != "" ]; then - current_date=$(date +"%Y-%m-%d") - if date -d "$current_date" +%s >/dev/null 2>&1 && date -d "$date" +%s >/dev/null 2>&1; then - if [ "$(date -d "$current_date" +%s)" -gt "$(date -d "$date" +%s)" ]; then - status=":x: WRONG REPORT DATE!" - fi - fi - fi - # Format link - use CSI name as fallback if link is empty if [ -z "$link" ] || [ "$link" == "" ]; then link_text="$csi" @@ -408,4 +340,3 @@ jobs: fi env: LOOP_WEBHOOK_URL: ${{ secrets.LOOP_TEST_CHANNEL }} - diff --git a/.github/workflows/e2e-reusable-pipeline.yml b/.github/workflows/e2e-reusable-pipeline.yml index 62dc5ca778..0d341b4ea2 100644 --- a/.github/workflows/e2e-reusable-pipeline.yml +++ b/.github/workflows/e2e-reusable-pipeline.yml @@ -102,7 +102,6 @@ jobs: kubeconfig-content: ${{ steps.generate-kubeconfig.outputs.config }} storage-type: ${{ steps.vars.outputs.storage_type }} nested-storageclass-name: ${{ steps.vars.outputs.nested_storageclass_name }} - bootstrap-failed: ${{ steps.dhctl-bootstrap.outcome == 'failure' }} steps: - uses: actions/checkout@v4 # with: @@ -374,7 +373,6 @@ jobs: - name: Upload generated files uses: actions/upload-artifact@v4 - id: artifact-upload if: always() with: name: generated-files-${{ inputs.storage_type }} @@ -387,7 +385,6 @@ jobs: - name: Upload ssh config uses: actions/upload-artifact@v4 - id: artifact-upload-ssh if: always() with: name: generated-files-ssh-${{ inputs.storage_type }} @@ -398,7 +395,6 @@ jobs: - name: Upload kubeconfig config uses: actions/upload-artifact@v4 - id: artifact-upload-kubeconfig with: name: generated-files-kubeconfig-${{ inputs.storage_type }} path: ${{ env.SETUP_CLUSTER_TYPE_PATH }}/kube-config @@ -410,9 +406,6 @@ jobs: name: Configure storage (${{ inputs.storage_type }}) runs-on: ubuntu-latest needs: bootstrap - outputs: - storage-replicated-setup: ${{ steps.storage-replicated-setup.outcome == 'failure' }} - storage-ceph-setup: ${{ steps.storage-ceph-setup.outcome == 'failure' }} steps: - uses: actions/checkout@v4 @@ -456,7 +449,7 @@ jobs: fi done - if $i -eq 3; then + if [ $i -eq 3 ]; then echo "[ERROR] Failed to retrieve nodes." exit 1 fi @@ -1047,7 +1040,7 @@ jobs: if [[ "${{ inputs.storage_type }}" == "replicated" ]]; then export SKIP_IMMEDIATE_SC_CHECK="yes" fi - + cp -a legacy/testdata /tmp/testdata set +e @@ -1100,7 +1093,6 @@ jobs: echo "$SUMMARY" echo "summary=$(echo "$SUMMARY" | jq -c .)" >> $GITHUB_OUTPUT - echo "report_file_name=${summary_file_name_json}" >> $GITHUB_OUTPUT echo $SUMMARY > "${summary_file_name_json}" echo "[INFO] Exit code: $GINKGO_EXIT_CODE" @@ -1148,7 +1140,7 @@ jobs: if [ -z "$BRANCH_NAME" ] || [ "$BRANCH_NAME" == "refs/heads/" ]; then BRANCH_NAME="${{ github.ref_name }}" fi - + # Function to create failure summary JSON with proper job URL create_failure_summary() { local stage=$1 @@ -1183,64 +1175,68 @@ jobs: Link: $link }' } - + # Try to find and load E2E test report E2E_REPORT_FILE="" REPORT_JSON="" - + # Search for report file in test/e2e directory E2E_REPORT_FILE=$(find test/e2e -name "e2e_summary_${{ inputs.storage_type }}_*.json" -type f 2>/dev/null | head -1) - + if [ -n "$E2E_REPORT_FILE" ] && [ -f "$E2E_REPORT_FILE" ]; then echo "[INFO] Found E2E report file: $E2E_REPORT_FILE" REPORT_JSON=$(cat "$E2E_REPORT_FILE" | jq -c .) echo "[INFO] Loaded report from file" echo "$REPORT_JSON" | jq . fi - + + # Function to process a stage + process_stage() { + local result_value="$1" + local stage_name="$2" + local status_msg="$3" + local job_name="$4" + local is_e2e_test="${5:-false}" + + if [ "$result_value" != "success" ]; then + FAILED_STAGE="$stage_name" + FAILED_JOB_NAME="$job_name (${{ inputs.storage_type }})" + + if [ -z "$REPORT_JSON" ] || [ "$REPORT_JSON" == "" ]; then + REPORT_JSON=$(create_failure_summary "$stage_name" "$status_msg" "$FAILED_JOB_NAME") + elif [ "$is_e2e_test" == "true" ]; then + # Special handling for e2e-test: update status if needed + CURRENT_STATUS=$(echo "$REPORT_JSON" | jq -r '.Status // ""') + if [[ "$CURRENT_STATUS" != *"FAIL"* ]] && [[ "$CURRENT_STATUS" != *"SUCCESS"* ]]; then + REPORT_JSON=$(echo "$REPORT_JSON" | jq -c '.Status = ":x: E2E TEST FAILED"') + fi + fi + return 0 # Stage failed + fi + return 1 # Stage succeeded + } + # Determine which stage failed and prepare report FAILED_STAGE="" FAILED_JOB_NAME="" - - if [ "${{ needs.bootstrap.result }}" != "success" ]; then - FAILED_STAGE="bootstrap" - FAILED_JOB_NAME="Bootstrap cluster (${{ inputs.storage_type }})" - if [ -z "$REPORT_JSON" ] || [ "$REPORT_JSON" == "" ]; then - REPORT_JSON=$(create_failure_summary "bootstrap" ":x: BOOTSTRAP CLUSTER FAILED" "$FAILED_JOB_NAME") - fi - elif [ "${{ needs.configure-storage.result }}" != "success" ]; then - FAILED_STAGE="storage-setup" - FAILED_JOB_NAME="Configure storage (${{ inputs.storage_type }})" - if [ -z "$REPORT_JSON" ] || [ "$REPORT_JSON" == "" ]; then - REPORT_JSON=$(create_failure_summary "storage-setup" ":x: STORAGE SETUP FAILED" "$FAILED_JOB_NAME") - fi - elif [ "${{ needs.configure-virtualization.result }}" != "success" ]; then - FAILED_STAGE="virtualization-setup" - FAILED_JOB_NAME="Configure Virtualization (${{ inputs.storage_type }})" - if [ -z "$REPORT_JSON" ] || [ "$REPORT_JSON" == "" ]; then - REPORT_JSON=$(create_failure_summary "virtualization-setup" ":x: VIRTUALIZATION SETUP FAILED" "$FAILED_JOB_NAME") - fi - elif [ "${{ needs.e2e-test.result }}" != "success" ]; then - FAILED_STAGE="e2e-test" - FAILED_JOB_NAME="E2E test (${{ inputs.storage_type }})" - if [ -z "$REPORT_JSON" ] || [ "$REPORT_JSON" == "" ]; then - REPORT_JSON=$(create_failure_summary "e2e-test" ":x: E2E TEST FAILED" "$FAILED_JOB_NAME") - else - # Report exists, but check if status needs to be updated - CURRENT_STATUS=$(echo "$REPORT_JSON" | jq -r '.Status // ""') - if [[ "$CURRENT_STATUS" != *"FAIL"* ]] && [[ "$CURRENT_STATUS" != *"SUCCESS"* ]]; then - # Update status to failure if not already set - REPORT_JSON=$(echo "$REPORT_JSON" | jq -c '.Status = ":x: E2E TEST FAILED"') - fi - fi + + if process_stage "${{ needs.bootstrap.result }}" "bootstrap" ":x: BOOTSTRAP CLUSTER FAILED" "Bootstrap cluster"; then + : # Stage failed, handled in function + elif process_stage "${{ needs.configure-storage.result }}" "storage-setup" ":x: STORAGE SETUP FAILED" "Configure storage"; then + : # Stage failed, handled in function + elif process_stage "${{ needs.configure-virtualization.result }}" "virtualization-setup" ":x: VIRTUALIZATION SETUP FAILED" "Configure Virtualization"; then + : # Stage failed, handled in function + elif process_stage "${{ needs.e2e-test.result }}" "e2e-test" ":x: E2E TEST FAILED" "E2E test" "true"; then + : # Stage failed, handled in function else + # All stages succeeded FAILED_STAGE="success" FAILED_JOB_NAME="E2E test (${{ inputs.storage_type }})" if [ -z "$REPORT_JSON" ] || [ "$REPORT_JSON" == "" ]; then REPORT_JSON=$(create_failure_summary "success" ":white_check_mark: SUCCESS!" "$FAILED_JOB_NAME") fi fi - + # Create structured report file with metadata REPORT_FILE="e2e_report_${{ inputs.storage_type }}.json" # Parse REPORT_JSON to ensure it's valid JSON before using it @@ -1260,7 +1256,7 @@ jobs: workflow_run_url: $workflow_run_url, report: $report }' > "$REPORT_FILE" - + echo "report_file=$REPORT_FILE" >> $GITHUB_OUTPUT echo "[INFO] Created report file: $REPORT_FILE" echo "[INFO] Failed stage: $FAILED_STAGE" From 48beb5c5385e77bf6ae2de9f89390121d4b34e09 Mon Sep 17 00:00:00 2001 From: Nikita Korolev Date: Fri, 26 Dec 2025 17:18:09 +0300 Subject: [PATCH 65/71] update wait virtualization Signed-off-by: Nikita Korolev --- .github/workflows/e2e-reusable-pipeline.yml | 115 +++++++++++++------- 1 file changed, 73 insertions(+), 42 deletions(-) diff --git a/.github/workflows/e2e-reusable-pipeline.yml b/.github/workflows/e2e-reusable-pipeline.yml index 0d341b4ea2..4724f2a1f8 100644 --- a/.github/workflows/e2e-reusable-pipeline.yml +++ b/.github/workflows/e2e-reusable-pipeline.yml @@ -859,6 +859,49 @@ jobs: d8 s queue list | grep -Po '([0-9]+)(?= active)' || echo "Failed to retrieve list queue" } + debug_output() { + echo "[ERROR] Virtualization module deploy failed" + echo "[DEBUG] Show describe virtualization module" + echo "::group::📦 describe virtualization module" + kubectl describe modules virtualization || true + echo "::endgroup::" + echo "[DEBUG] Show namespace d8-virtualization" + kubectl get ns d8-virtualization || true + echo "[DEBUG] Show pods in namespace d8-virtualization" + kubectl -n d8-virtualization get pods || true + echo "[DEBUG] Show dvcr info" + echo "::group::📦 dvcr pod describe" + kubectl -n d8-virtualization describe pod -l app=dvcr || true + echo "::endgroup::" + echo " " + echo "::group::📦 dvcr pod yaml" + kubectl -n d8-virtualization get pods -l app=dvcr -o yaml || true + echo "::endgroup::" + echo " " + echo "::group::📦 dvcr deployment yaml" + kubectl -n d8-virtualization get deployment -l app=dvcr -o yaml || true + echo "::endgroup::" + echo " " + echo "::group::📦 dvcr deployment describe" + kubectl -n d8-virtualization describe deployment -l app=dvcr || true + echo "::endgroup::" + echo " " + echo "::group::📦 dvcr service yaml" + kubectl -n d8-virtualization get service -l app=dvcr -o yaml || true + echo "::endgroup::" + echo " " + echo "[DEBUG] Show pvc in namespace d8-virtualization" + kubectl get pvc -n d8-virtualization || true + echo "[DEBUG] Show storageclasses" + kubectl get storageclasses || true + echo "[DEBUG] Show queue (first 25 lines)" + d8 s queue list | head -n 25 || echo "[WARNING] Failed to retrieve list queue" + echo "[DEBUG] Show deckhouse logs" + echo "::group::📝 deckhouse logs" + d8 s logs | tail -n 100 + echo "::endgroup::" + } + d8_queue() { local count=90 local queue_count @@ -916,46 +959,33 @@ jobs: sleep 10 done - echo "[ERROR] Virtualization module deploy failed" - echo "[DEBUG] Show describe virtualization module" - echo "::group::📦 describe virtualization module" - kubectl describe modules virtualization || true - echo "::endgroup::" - echo "[DEBUG] Show namespace d8-virtualization" - kubectl get ns d8-virtualization || true - echo "[DEBUG] Show pods in namespace d8-virtualization" - kubectl -n d8-virtualization get pods || true - echo "[DEBUG] Show dvcr info" - echo "::group::📦 dvcr pod describe" - kubectl -n d8-virtualization describe pod -l app=dvcr || true - echo "::endgroup::" - echo " " - echo "::group::📦 dvcr pod yaml" - kubectl -n d8-virtualization get pods -l app=dvcr -o yaml || true - echo "::endgroup::" - echo " " - echo "::group::📦 dvcr deployment yaml" - kubectl -n d8-virtualization get deployment -l app=dvcr -o yaml || true - echo "::endgroup::" - echo " " - echo "::group::📦 dvcr deployment describe" - kubectl -n d8-virtualization describe deployment -l app=dvcr || true - echo "::endgroup::" - echo " " - echo "::group::📦 dvcr service yaml" - kubectl -n d8-virtualization get service -l app=dvcr -o yaml || true - echo "::endgroup::" - echo " " - echo "[DEBUG] Show pvc in namespace d8-virtualization" - kubectl get pvc -n d8-virtualization || true - echo "[DEBUG] Show storageclasses" - kubectl get storageclasses || true - echo "[DEBUG] Show queue (first 25 lines)" - d8 s queue list | head -n 25 || echo "[WARNING] Failed to retrieve list queue" - echo "[DEBUG] Show deckhouse logs" - echo "::group::📝 deckhouse logs" - d8 s logs | tail -n 100 - echo "::endgroup::" + debug_output + exit 1 + } + + virt_handler_ready() { + local count=60 + local virt_handler_status + local workers=$(kubectl get nodes -o name | grep worker | wc -l || true) + workers=$((workers)) + + for i in $(seq 1 $count); do + virt_handler_status=$(kubectl -n d8-virtualization get pods | grep " virt-handler.*Running" | wc -l || true) + + if [[ $virt_handler_status -ge $workers ]]; then + echo "[SUCCESS] virt-handlers pods are ready" + return 0 + fi + + if (( i % 5 == 0 )); then + echo "[DEBUG] Get pods" + kubectl -n d8-virtualization get pods || echo "No pods virt-handler found" + echo " " + fi + sleep 10 + done + + debug_output exit 1 } @@ -967,8 +997,9 @@ jobs: echo "[INFO] Checking Virtualization module deployments" kubectl -n d8-virtualization wait --for=condition=Available deploy --all --timeout 300s - echo "[INFO] Checking virt-handler pods (timeout 300s)" - kubectl -n d8-virtualization wait --for=condition=Ready pods -l kubevirt.internal.virtualization.deckhouse.io=virt-handler --timeout 300s + echo "[INFO] Checking virt-handler pods " + virt_handler_ready + e2e-test: name: E2E test (${{ inputs.storage_type }}) runs-on: ubuntu-22.04 From 3f1354d85b14bf7c28a564864b7630694eddab1b Mon Sep 17 00:00:00 2001 From: Nikita Korolev Date: Fri, 26 Dec 2025 17:58:03 +0300 Subject: [PATCH 66/71] add condition for FOCUS var for ginkgo test Signed-off-by: Nikita Korolev --- .github/workflows/e2e-reusable-pipeline.yml | 14 ++++++++++---- test/e2e/scripts/task_run_ci.sh | 15 +++++---------- 2 files changed, 15 insertions(+), 14 deletions(-) diff --git a/.github/workflows/e2e-reusable-pipeline.yml b/.github/workflows/e2e-reusable-pipeline.yml index 4724f2a1f8..5114405020 100644 --- a/.github/workflows/e2e-reusable-pipeline.yml +++ b/.github/workflows/e2e-reusable-pipeline.yml @@ -1076,10 +1076,16 @@ jobs: set +e FOCUS="VirtualMachineConfiguration" \ - go tool ginkgo \ - --focus "VirtualMachineConfiguration" \ - -v --race --timeout=$TIMEOUT \ - --junit-report=$summary_file_name_junit | tee $GINKGO_RESULT + if [ -n $FOCUS ]; then + go tool ginkgo \ + --focus="$FOCUS" \ + -v --race --timeout=$TIMEOUT \ + --junit-report=$summary_file_name_junit | tee $GINKGO_RESULT + else + go tool ginkgo \ + -v --race --timeout=$TIMEOUT \ + --junit-report=$summary_file_name_junit | tee $GINKGO_RESULT + fi GINKGO_EXIT_CODE=$? set -e diff --git a/test/e2e/scripts/task_run_ci.sh b/test/e2e/scripts/task_run_ci.sh index 373e83c923..9a3e7a8457 100755 --- a/test/e2e/scripts/task_run_ci.sh +++ b/test/e2e/scripts/task_run_ci.sh @@ -21,21 +21,16 @@ echo "DATE=$DATE" >> $GITHUB_ENV START_TIME=$(date +"%H:%M:%S") echo "START_TIME=$START_TIME" >> $GITHUB_ENV -if [[ -n $FOCUS ]];then - go tool ginkgo --focus "$FOCUS" -v --race --timeout=$TIMEOUT | tee $GINKGO_RESULT -else - go tool ginkgo -v --race --timeout=$TIMEOUT | tee $GINKGO_RESULT -fi - -# EXIT_CODE="${PIPESTATUS[0]}" +go tool ginkgo -v --race --timeout=$TIMEOUT | tee $GINKGO_RESULT +EXIT_CODE="${PIPESTATUS[0]}" RESULT=$(sed -e "s/\x1b\[[0-9;]*m//g" $GINKGO_RESULT | grep --color=never -E "FAIL!|SUCCESS!") -if [[ $RESULT == FAIL!* ]]; then +if [[ $RESULT == FAIL!* || $EXIT_CODE -ne "0" ]]; then RESULT_STATUS=":x: FAIL!" elif [[ $RESULT == SUCCESS!* ]]; then RESULT_STATUS=":white_check_mark: SUCCESS!" else RESULT_STATUS=":question: UNKNOWN" - # EXIT_CODE=1 + EXIT_CODE=1 fi PASSED=$(echo "$RESULT" | grep -oP "\d+(?= Passed)") @@ -70,4 +65,4 @@ SUMMARY=$(jq -n \ echo "$SUMMARY" echo "SUMMARY=$(echo "$SUMMARY" | jq -c .)" >> $GITHUB_ENV -# exit $EXIT_CODE +exit $EXIT_CODE From 558567844ee3ae278ae7772bdbbf4e8247db968d Mon Sep 17 00:00:00 2001 From: Nikita Korolev Date: Fri, 26 Dec 2025 20:06:41 +0300 Subject: [PATCH 67/71] fix virt-handler wait Signed-off-by: Nikita Korolev --- .github/workflows/e2e-reusable-pipeline.yml | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/.github/workflows/e2e-reusable-pipeline.yml b/.github/workflows/e2e-reusable-pipeline.yml index 5114405020..38df089245 100644 --- a/.github/workflows/e2e-reusable-pipeline.yml +++ b/.github/workflows/e2e-reusable-pipeline.yml @@ -965,22 +965,25 @@ jobs: virt_handler_ready() { local count=60 - local virt_handler_status + local virt_handler_ready local workers=$(kubectl get nodes -o name | grep worker | wc -l || true) workers=$((workers)) for i in $(seq 1 $count); do - virt_handler_status=$(kubectl -n d8-virtualization get pods | grep " virt-handler.*Running" | wc -l || true) + virt_handler_ready=$(kubectl -n d8-virtualization get pods | grep "virt-handler.*Running" | wc -l || true) - if [[ $virt_handler_status -ge $workers ]]; then + if [[ $virt_handler_ready -ge $workers ]]; then echo "[SUCCESS] virt-handlers pods are ready" return 0 fi + echo "[INFO] virt-handler pods $virt_handler_ready/$workers " + echo "[INFO] Wait virt-handler pods are ready (attempt $i/$count)" if (( i % 5 == 0 )); then - echo "[DEBUG] Get pods" + echo "[DEBUG] Get pods in namespace d8-virtualization" + echo "::group::📦 virt-handler pods" kubectl -n d8-virtualization get pods || echo "No pods virt-handler found" - echo " " + echo "::endgroup::" fi sleep 10 done From 46e5e2813354ce2cbe4c6ddbac9768d7f085a17d Mon Sep 17 00:00:00 2001 From: Nikita Korolev Date: Fri, 26 Dec 2025 20:08:56 +0300 Subject: [PATCH 68/71] rm commenter always from undeploy-cluster Signed-off-by: Nikita Korolev --- .github/workflows/e2e-reusable-pipeline.yml | 1 - 1 file changed, 1 deletion(-) diff --git a/.github/workflows/e2e-reusable-pipeline.yml b/.github/workflows/e2e-reusable-pipeline.yml index 38df089245..d96c87328d 100644 --- a/.github/workflows/e2e-reusable-pipeline.yml +++ b/.github/workflows/e2e-reusable-pipeline.yml @@ -1326,7 +1326,6 @@ jobs: - configure-storage - configure-virtualization - e2e-test - # if: always() if: cancelled() || success() steps: - uses: actions/checkout@v4 From b846e64a0a3e88557da7d9c0dca00188a8d42c3e Mon Sep 17 00:00:00 2001 From: Nikita Korolev Date: Mon, 29 Dec 2025 11:25:21 +0300 Subject: [PATCH 69/71] fix FOCUS nexline Signed-off-by: Nikita Korolev --- .github/workflows/e2e-reusable-pipeline.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/e2e-reusable-pipeline.yml b/.github/workflows/e2e-reusable-pipeline.yml index d96c87328d..73dd8fad73 100644 --- a/.github/workflows/e2e-reusable-pipeline.yml +++ b/.github/workflows/e2e-reusable-pipeline.yml @@ -1078,7 +1078,7 @@ jobs: cp -a legacy/testdata /tmp/testdata set +e - FOCUS="VirtualMachineConfiguration" \ + FOCUS="VirtualMachineConfiguration" if [ -n $FOCUS ]; then go tool ginkgo \ --focus="$FOCUS" \ From 7d4f48cb77f45933d818129ef1516a325b586a41 Mon Sep 17 00:00:00 2001 From: Nikita Korolev Date: Tue, 30 Dec 2025 15:42:56 +0300 Subject: [PATCH 70/71] fix formatting Signed-off-by: Nikita Korolev --- .../scripts/gen-kubeconfig.sh | 28 +++++++++---------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/test/dvp-static-cluster/scripts/gen-kubeconfig.sh b/test/dvp-static-cluster/scripts/gen-kubeconfig.sh index efab6c25ad..50ed5d5c35 100755 --- a/test/dvp-static-cluster/scripts/gen-kubeconfig.sh +++ b/test/dvp-static-cluster/scripts/gen-kubeconfig.sh @@ -111,7 +111,7 @@ kubectl apply -f -< Date: Wed, 14 Jan 2026 17:15:05 +0300 Subject: [PATCH 71/71] set current kube context in kubeconfig Signed-off-by: Nikita Korolev --- .github/workflows/e2e-reusable-pipeline.yml | 12 ++++++------ test/dvp-static-cluster/scripts/gen-kubeconfig.sh | 7 +++++++ 2 files changed, 13 insertions(+), 6 deletions(-) diff --git a/.github/workflows/e2e-reusable-pipeline.yml b/.github/workflows/e2e-reusable-pipeline.yml index 73dd8fad73..f15d902258 100644 --- a/.github/workflows/e2e-reusable-pipeline.yml +++ b/.github/workflows/e2e-reusable-pipeline.yml @@ -435,21 +435,21 @@ jobs: chmod 600 ~/.kube/config echo "[INFO] Show nodes in cluster" - kubectl config use-context nested-e2e-nested-sa + kubectl config get-contexts # some times kubectl get nodes returns error, so we need to retry - for i in {1..3}; do + for i in {1..5}; do echo "Attempt $i/3..." if (kubectl get nodes); then echo "[SUCCESS] Successfully retrieved nodes." break - else - echo "[INFO] Retrying in 5 seconds..." - sleep 5 fi + + echo "[INFO] Retrying in 10 seconds..." + sleep 10 done - if [ $i -eq 3 ]; then + if [ $i -eq 5 ]; then echo "[ERROR] Failed to retrieve nodes." exit 1 fi diff --git a/test/dvp-static-cluster/scripts/gen-kubeconfig.sh b/test/dvp-static-cluster/scripts/gen-kubeconfig.sh index 50ed5d5c35..21395bb0eb 100755 --- a/test/dvp-static-cluster/scripts/gen-kubeconfig.sh +++ b/test/dvp-static-cluster/scripts/gen-kubeconfig.sh @@ -160,11 +160,18 @@ kubeconfig_set_context() { --kubeconfig=${FILE_NAME} } +kubeconfig_set_current_context() { + log_info "Set current context" + kubectl config set current-context ${CONTEXT_NAME} \ + --kubeconfig=${FILE_NAME} +} + log_info "Create kubeconfig" kubeconfig_cert_cluster_section kubeconfig_set_credentials kubeconfig_set_context +kubeconfig_set_current_context log_success "kubeconfig created and stored in ${FILE_NAME}"