diff --git a/.builder-image-version.txt b/.builder-image-version.txt
index 154b9fce5..4a4127c37 100644
--- a/.builder-image-version.txt
+++ b/.builder-image-version.txt
@@ -1 +1 @@
-1.0.23
+1.0.25
diff --git a/.github/actions/e2e/action.yaml b/.github/actions/e2e/action.yaml
index 72d763a68..915171e61 100644
--- a/.github/actions/e2e/action.yaml
+++ b/.github/actions/e2e/action.yaml
@@ -23,6 +23,8 @@ runs:
using: "composite"
steps:
- uses: actions/setup-go@f111f3307d8850f501ac008e886eec1fd1932a34 # v5.3.0
+ with:
+ go-version-file: "go.mod"
- uses: actions/cache@d4323d4df104b026a6aa633fdb11d772146be0bf # v4
with:
@@ -41,8 +43,11 @@ runs:
- name: Install hcloud
shell: bash
env:
- hcloudctl_version: 1.40.0
- run: curl -fsSL https://github.com/hetznercloud/cli/releases/download/v${{ env.hcloudctl_version }}/hcloud-linux-amd64.tar.gz | tar -xzv hcloud ; mv hcloud hack/tools/bin/hcloud
+ hcloudctl_version: v1.52.0
+ run: |
+ curl -fsSL https://github.com/hetznercloud/cli/releases/download/${{ env.hcloudctl_version }}/hcloud-linux-amd64.tar.gz | tar -xzv hcloud
+ mkdir -p hack/tools/bin/
+ mv hcloud hack/tools/bin/hcloud
- name: Generate metadata
id: meta
diff --git a/.github/actions/fixes/action.yaml b/.github/actions/fixes/action.yaml
new file mode 100644
index 000000000..d1514d6e8
--- /dev/null
+++ b/.github/actions/fixes/action.yaml
@@ -0,0 +1,25 @@
+name: Fix env var HOME and git permissions
+description: Fix env var HOME and git permissions
+runs:
+ using: "composite"
+ steps:
+ - name: Fix HOME Directory
+ shell: bash
+ run: |
+ # Issue [HOME is overridden for containers](https://github.com/actions/runner/issues/863)
+ h=$(getent passwd $(id -un) | cut -d: -f6)
+ if [ "$h" = "$HOME" ]; then
+ echo "HOME fine: $HOME"
+ exit 0
+ fi
+ echo "HOME=$HOME was broken. Setting it to $h"
+ ls -ld $HOME
+ ls -ld $h
+ echo "USER: $USER"
+ echo "id: $(id)"
+ echo "HOME=$h" >> $GITHUB_ENV
+
+ - name: Fixup git permissions
+ # https://github.com/actions/checkout/issues/766
+ shell: bash
+ run: git config --global --add safe.directory "$GITHUB_WORKSPACE"
diff --git a/.github/actions/test-release/action.yaml b/.github/actions/test-release/action.yaml
index 8a55db32f..c7a340671 100644
--- a/.github/actions/test-release/action.yaml
+++ b/.github/actions/test-release/action.yaml
@@ -4,6 +4,8 @@ runs:
using: "composite"
steps:
- uses: actions/setup-go@f111f3307d8850f501ac008e886eec1fd1932a34 # v5.3.0
+ with:
+ go-version-file: "go.mod"
- uses: actions/cache@d4323d4df104b026a6aa633fdb11d772146be0bf # v4
with:
diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml
index bcde4ecd6..082eb4f91 100644
--- a/.github/workflows/build.yml
+++ b/.github/workflows/build.yml
@@ -33,6 +33,8 @@ jobs:
fetch-depth: 0
- uses: actions/setup-go@f111f3307d8850f501ac008e886eec1fd1932a34 # v5.3.0
+ with:
+ go-version-file: "go.mod"
- name: Set up QEMU
uses: docker/setup-qemu-action@29109295f81e9208d7d86ff1c6c12d2833863392 # v3
@@ -55,7 +57,7 @@ jobs:
password: ${{ secrets.GITHUB_TOKEN }}
- name: Install Cosign
- uses: sigstore/cosign-installer@d7d6bc7722e3daa8354c50bcb52f4837da5e9b6a # v3.8.1
+ uses: sigstore/cosign-installer@faadad0cce49287aee09b3a48701e75088a2c6ad # v4.0.0
- name: Install Bom
shell: bash
diff --git a/.github/workflows/main-promote-builder-image.yml b/.github/workflows/main-promote-builder-image.yml
index b13160ff7..c707a893e 100644
--- a/.github/workflows/main-promote-builder-image.yml
+++ b/.github/workflows/main-promote-builder-image.yml
@@ -10,7 +10,7 @@ jobs:
name: Promote Latest tag to Caph Builder Image
runs-on: ubuntu-latest
container:
- image: ghcr.io/syself/caph-builder:1.0.23
+ image: ghcr.io/syself/caph-builder:1.0.25
credentials:
username: ${{ github.actor }}
password: ${{ secrets.github_token }}
diff --git a/.github/workflows/pr-e2e.yaml b/.github/workflows/pr-e2e.yaml
index 4bd80c68d..b3a1c358e 100644
--- a/.github/workflows/pr-e2e.yaml
+++ b/.github/workflows/pr-e2e.yaml
@@ -38,9 +38,20 @@ jobs:
name: Test Release
if: github.event_name != 'pull_request' || !github.event.pull_request.draft
runs-on: ubuntu-latest
+ container:
+ image: ghcr.io/syself/caph-builder:1.0.25
+ credentials:
+ username: ${{ github.actor }}
+ password: ${{ secrets.github_token }}
+ env:
+ BUILD_IN_CONTAINER: "false"
steps:
- name: checkout
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4
+
+ - name: Fix broken env var HOME
+ uses: ./.github/actions/fixes
+
- name: Test Release
id: manager-image
uses: ./.github/actions/test-release
diff --git a/.github/workflows/pr-lint.yml b/.github/workflows/pr-lint.yml
index ee07d8cc8..08b7d333b 100644
--- a/.github/workflows/pr-lint.yml
+++ b/.github/workflows/pr-lint.yml
@@ -22,7 +22,7 @@ jobs:
name: "Lint Pull Request"
runs-on: ubuntu-latest
container:
- image: ghcr.io/syself/caph-builder:1.0.23
+ image: ghcr.io/syself/caph-builder:1.0.25
credentials:
username: ${{ github.actor }}
password: ${{ secrets.github_token }}
diff --git a/.github/workflows/pr-verify.yml b/.github/workflows/pr-verify.yml
index 37c1d7f39..ca97cc353 100644
--- a/.github/workflows/pr-verify.yml
+++ b/.github/workflows/pr-verify.yml
@@ -10,13 +10,26 @@ jobs:
pull-requests: write
issues: write
runs-on: ubuntu-latest
+ container:
+ image: ghcr.io/syself/caph-builder:1.0.25
+ credentials:
+ username: ${{ github.actor }}
+ password: ${{ secrets.github_token }}
name: Verify Pull Request
steps:
- name: Checkout repository
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
+ - name: Fix Env Var HOME
+ uses: ./.github/actions/fixes
+ # Fixes:
+ # fatal: detected dubious ownership in repository at '/__w/clu...
+ # To add an exception for this directory, call:
+ # git config --global --add safe.directory /__w...
+
# Take from https://github.com/kubernetes-sigs/kubebuilder/blob/master/.github/workflows/verify.yml
- name: Validate PR Title Format
+ shell: bash
env:
TITLE: ${{ github.event.pull_request.title }}
run: |
@@ -48,6 +61,9 @@ jobs:
- name: Verify Starlark
run: make verify-starlark
+ - name: Verify Generated Files of Git Repo
+ run: make BUILD_IN_CONTAINER=false verify-generated-files
+
- uses: actions/setup-node@cdca7365b2dadb8aad0a33bc7601856ffabcc48e # v4
with:
node-version: "22"
@@ -55,6 +71,7 @@ jobs:
run: npm i -g renovate@35.54.0 # TODO update this via renovatebot
- name: Validate config
+ shell: bash
run: |
for file in $(find . -name "*.json5"); do
renovate-config-validator ${file}
diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml
index 3a04457b9..a65f71ca0 100644
--- a/.github/workflows/release.yml
+++ b/.github/workflows/release.yml
@@ -24,6 +24,8 @@ jobs:
fetch-depth: 0
- uses: actions/setup-go@f111f3307d8850f501ac008e886eec1fd1932a34 # v5.3.0
+ with:
+ go-version-file: "go.mod"
- name: Set up QEMU
uses: docker/setup-qemu-action@29109295f81e9208d7d86ff1c6c12d2833863392 # v3
@@ -46,7 +48,7 @@ jobs:
password: ${{ secrets.GITHUB_TOKEN }}
- name: Install Cosign
- uses: sigstore/cosign-installer@d7d6bc7722e3daa8354c50bcb52f4837da5e9b6a # v3.8.1
+ uses: sigstore/cosign-installer@faadad0cce49287aee09b3a48701e75088a2c6ad # v4.0.0
- name: Install Bom
shell: bash
@@ -138,6 +140,8 @@ jobs:
fetch-depth: 0
- uses: actions/setup-go@f111f3307d8850f501ac008e886eec1fd1932a34 # v5.3.0
+ with:
+ go-version-file: "go.mod"
- name: install kustomize
run: |
diff --git a/.github/workflows/report-bin-size.yml b/.github/workflows/report-bin-size.yml
index 5290ce066..a2a402b05 100644
--- a/.github/workflows/report-bin-size.yml
+++ b/.github/workflows/report-bin-size.yml
@@ -17,6 +17,8 @@ jobs:
run: git config --global --add safe.directory "$GITHUB_WORKSPACE"
- uses: actions/setup-go@f111f3307d8850f501ac008e886eec1fd1932a34 # v5.3.0
+ with:
+ go-version-file: "go.mod"
- name: Create Binsize Report
run: make caph report-binsize-treemap-all
diff --git a/.github/workflows/schedule-scan-image.yml b/.github/workflows/schedule-scan-image.yml
index 6473b52df..2194b26b7 100644
--- a/.github/workflows/schedule-scan-image.yml
+++ b/.github/workflows/schedule-scan-image.yml
@@ -10,7 +10,7 @@ jobs:
runs-on: ubuntu-latest
if: github.repository == 'syself/cluster-api-provider-hetzner'
container:
- image: ghcr.io/syself/caph-builder:1.0.23
+ image: ghcr.io/syself/caph-builder:1.0.25
credentials:
username: ${{ github.actor }}
password: ${{ secrets.github_token }}
diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml
index 77f3373d9..65ad898ff 100644
--- a/.github/workflows/test.yml
+++ b/.github/workflows/test.yml
@@ -41,6 +41,8 @@ jobs:
echo name=${NAME} >> $GITHUB_OUTPUT
- uses: actions/setup-go@f111f3307d8850f501ac008e886eec1fd1932a34 # v5.3.0
+ with:
+ go-version-file: "go.mod"
- name: Install dependencies
run: make gotestsum go-cover-treemap setup-envtest
diff --git a/.gitignore b/.gitignore
index 509094797..fa257cb80 100644
--- a/.gitignore
+++ b/.gitignore
@@ -8,6 +8,7 @@ bin
hack/tools/bin
testbin/*
temp
+/tmp
# Test binary, build with `go test -c`
*.test
.coverage
diff --git a/.golangci.yaml b/.golangci.yaml
index 155998794..3cb401836 100644
--- a/.golangci.yaml
+++ b/.golangci.yaml
@@ -83,8 +83,6 @@ linters-settings:
alias: bootstrapv1
- pkg: sigs.k8s.io/controller-runtime/pkg/metrics/server
alias: metricsserver
- - pkg: sigs.k8s.io/cluster-api/errors
- alias: capierrors
- pkg: sigs.k8s.io/controller-runtime/pkg/client/fake
alias: fakeclient
nolintlint:
diff --git a/.mockery.yaml b/.mockery.yaml
new file mode 100644
index 000000000..35a022e54
--- /dev/null
+++ b/.mockery.yaml
@@ -0,0 +1,18 @@
+# This config was chosen, so that the output matches to old structure (pre config file .mockery.yaml).
+# If you are here to copy this config to a new project, then it might
+# make sense to choose a structure which needs less config by using
+# the default values of Mockery.
+all: True
+filename: "{{.InterfaceName}}.go"
+mockname: "{{.InterfaceName}}"
+outpkg: mocks
+packages:
+ github.com/syself/cluster-api-provider-hetzner/pkg/services/baremetal/client/ssh:
+ config:
+ dir: "{{.InterfaceDir}}/../mocks/ssh"
+ github.com/syself/cluster-api-provider-hetzner/pkg/services/baremetal/client/robot:
+ config:
+ dir: "{{.InterfaceDir}}/../mocks/robot"
+ github.com/syself/cluster-api-provider-hetzner/pkg/services/hcloud/client:
+ config:
+ dir: "{{.InterfaceDir}}/mocks"
diff --git a/Makefile b/Makefile
index 7854dbc48..120d1c796 100644
--- a/Makefile
+++ b/Makefile
@@ -88,10 +88,6 @@ export KUBEBUILDER_ENVTEST_KUBERNETES_VERSION ?= 1.31.0
############
# Binaries #
############
-CONTROLLER_GEN := $(abspath $(TOOLS_BIN_DIR)/controller-gen)
-controller-gen: $(CONTROLLER_GEN) ## Build a local copy of controller-gen
-$(CONTROLLER_GEN): # Build controller-gen from tools folder.
- go install sigs.k8s.io/controller-tools/cmd/controller-gen@v0.14.0
KUSTOMIZE := $(abspath $(TOOLS_BIN_DIR)/kustomize)
kustomize: $(KUSTOMIZE) ## Build a local copy of kustomize
@@ -124,12 +120,6 @@ clusterctl: $(CLUSTERCTL) ## Build a local copy of clusterctl
$(CLUSTERCTL):
go install sigs.k8s.io/cluster-api/cmd/clusterctl@v1.8.10
-HELM := $(abspath $(TOOLS_BIN_DIR)/helm)
-helm: $(HELM) ## Build a local copy of helm
-$(HELM):
- curl -sSL https://get.helm.sh/helm-v3.13.2-$$(go env GOOS)-$$(go env GOARCH).tar.gz | tar xz -C $(TOOLS_BIN_DIR) --strip-components=1 $$(go env GOOS)-$$(go env GOARCH)/helm
- chmod a+rx $(HELM)
-
HCLOUD := $(abspath $(TOOLS_BIN_DIR)/hcloud)
hcloud: $(HCLOUD) ## Build a local copy of hcloud
$(HCLOUD):
@@ -162,7 +152,7 @@ gotestsum: $(GOTESTSUM) # Build gotestsum from tools folder.
$(GOTESTSUM):
go install gotest.tools/gotestsum@v1.11.0
-all-tools: $(GOTESTSUM) $(go-cover-treemap) $(go-binsize-treemap) $(KIND) $(KUBECTL) $(CLUSTERCTL) $(CTLPTL) $(SETUP_ENVTEST) $(ENVSUBST) $(KUSTOMIZE) $(CONTROLLER_GEN) $(HELM) ## Install all tools required for development
+all-tools: $(GOTESTSUM) $(go-cover-treemap) $(go-binsize-treemap) $(KIND) $(KUBECTL) $(CLUSTERCTL) $(CTLPTL) $(SETUP_ENVTEST) $(ENVSUBST) $(KUSTOMIZE) ## Install all tools required for development
echo 'done'
##@ Development
@@ -184,7 +174,7 @@ install-essentials: ## This gets the secret and installs a CNI and the CCM. Usag
$(MAKE) install-cilium-in-wl-cluster
$(MAKE) install-ccm-in-wl-cluster
-wait-and-get-secret:
+wait-and-get-secret: $(KUBECTL)
./hack/ensure-env-variables.sh CLUSTER_NAME
# Wait for the kubeconfig to become available.
rm -f $(WORKER_CLUSTER_KUBECONFIG)
@@ -193,21 +183,29 @@ wait-and-get-secret:
./hack/get-kubeconfig-of-workload-cluster.sh
${TIMEOUT} --foreground 15m bash -c "while ! $(KUBECTL) --kubeconfig=$(WORKER_CLUSTER_KUBECONFIG) get nodes | grep control-plane; do sleep 1; done"
-install-cilium-in-wl-cluster: $(HELM)
+install-cilium-in-wl-cluster:
# Deploy cilium
- $(HELM) repo add cilium https://helm.cilium.io/
- $(HELM) repo update cilium
- KUBECONFIG=$(WORKER_CLUSTER_KUBECONFIG) $(HELM) upgrade --install cilium cilium/cilium \
+ helm repo add cilium https://helm.cilium.io/
+ helm repo update cilium
+ KUBECONFIG=$(WORKER_CLUSTER_KUBECONFIG) helm upgrade --install cilium cilium/cilium \
--namespace kube-system \
-f templates/cilium/cilium.yaml
+
install-ccm-in-wl-cluster:
- $(HELM) repo add syself https://charts.syself.com
- $(HELM) repo update syself
- KUBECONFIG=$(WORKER_CLUSTER_KUBECONFIG) $(HELM) upgrade --install ccm syself/ccm-hetzner --version 1.1.10 \
+ifeq ($(BUILD_IN_CONTAINER),true)
+ docker run --rm \
+ -v $(shell go env GOPATH)/pkg:/go/pkg$(MOUNT_FLAGS) \
+ -v $(shell pwd):/src/cluster-api-provider-$(INFRA_PROVIDER)$(MOUNT_FLAGS) \
+ $(BUILDER_IMAGE):$(BUILDER_IMAGE_VERSION) $@;
+else
+ helm repo add syself https://charts.syself.com
+ helm repo update syself
+ KUBECONFIG=$(WORKER_CLUSTER_KUBECONFIG) helm upgrade --install ccm syself/ccm-hetzner --version 2.0.1 \
--namespace kube-system \
--set privateNetwork.enabled=$(PRIVATE_NETWORK)
@echo 'run "kubectl --kubeconfig=$(WORKER_CLUSTER_KUBECONFIG) ..." to work with the new target cluster'
+endif
add-ssh-pub-key:
./hack/ensure-env-variables.sh HCLOUD_TOKEN SSH_KEY SSH_KEY_NAME
@@ -243,6 +241,7 @@ create-workload-cluster-hcloud-network: env-vars-for-wl-cluster $(KUSTOMIZE) $(E
$(MAKE) install-cilium-in-wl-cluster
$(MAKE) install-ccm-in-wl-cluster PRIVATE_NETWORK=true
+# Use that, if you want to test hcloud control-planes, hcloud worker and bm worker.
create-workload-cluster-hetzner-hcloud-control-plane: env-vars-for-wl-cluster $(KUSTOMIZE) $(ENVSUBST) ## Creates a workload-cluster.
# Create workload Cluster.
./hack/ensure-env-variables.sh HCLOUD_TOKEN HETZNER_ROBOT_USER HETZNER_ROBOT_PASSWORD HETZNER_SSH_PRIV_PATH HETZNER_SSH_PUB_PATH SSH_KEY_NAME
@@ -570,6 +569,17 @@ verify-manifests:
verify-container-images: ## Verify container images
trivy image -q --exit-code 1 --ignore-unfixed --severity MEDIUM,HIGH,CRITICAL $(IMAGE_PREFIX)/$(INFRA_SHORT):latest
+.PHONY: verify-generated-files
+verify-generated-files: ## Verify geneated files in git repo
+ifeq ($(BUILD_IN_CONTAINER),true)
+ docker run --rm \
+ -v $(shell go env GOPATH)/pkg:/go/pkg$(MOUNT_FLAGS) \
+ -v $(shell pwd):/src/cluster-api-provider-$(INFRA_PROVIDER)$(MOUNT_FLAGS) \
+ $(BUILDER_IMAGE):$(BUILDER_IMAGE_VERSION) $@;
+else
+ ./hack/verify-generated-files.sh
+endif
+
##@ Generate
############
# Generate #
@@ -581,7 +591,7 @@ generate-boilerplate: ## Generates missing boilerplates
# support go modules
generate-modules: ## Generates missing go modules
ifeq ($(BUILD_IN_CONTAINER),true)
- docker run --rm -t -i \
+ docker run --rm \
-v $(shell go env GOPATH)/pkg:/go/pkg$(MOUNT_FLAGS) \
-v $(shell pwd):/src/cluster-api-provider-$(INFRA_PROVIDER)$(MOUNT_FLAGS) \
$(BUILDER_IMAGE):$(BUILDER_IMAGE_VERSION) $@;
@@ -599,8 +609,17 @@ generate-modules-ci: generate-modules
exit 1; \
fi
-generate-manifests: $(CONTROLLER_GEN) ## Generate WebhookConfiguration, ClusterRole and CustomResourceDefinition objects.
- $(CONTROLLER_GEN) \
+generate-manifests: ## Generate WebhookConfiguration, ClusterRole and CustomResourceDefinition objects.
+ifeq ($(BUILD_IN_CONTAINER),true)
+ docker run --rm \
+ -v $(shell go env GOPATH)/pkg:/go/pkg$(MOUNT_FLAGS) \
+ -v $(shell pwd):/src/cluster-api-provider-$(INFRA_PROVIDER)$(MOUNT_FLAGS) \
+ $(BUILDER_IMAGE):$(BUILDER_IMAGE_VERSION) $@;
+else
+ # Ensure that these old binaries are not longer used. We use
+ # these from the builder-image now.
+ rm -f ./hack/tools/bin/controller-gen ./hack/tools/bin/helm
+ controller-gen \
paths=./api/... \
paths=./controllers/... \
crd:crdVersions=v1 \
@@ -608,11 +627,19 @@ generate-manifests: $(CONTROLLER_GEN) ## Generate WebhookConfiguration, ClusterR
output:crd:dir=./config/crd/bases \
output:webhook:dir=./config/webhook \
webhook
+endif
-generate-go-deepcopy: $(CONTROLLER_GEN) ## Generate code containing DeepCopy, DeepCopyInto, and DeepCopyObject method implementations.
- $(CONTROLLER_GEN) \
+generate-go-deepcopy: ## Generate code containing DeepCopy, DeepCopyInto, and DeepCopyObject method implementations.
+ifeq ($(BUILD_IN_CONTAINER),true)
+ docker run --rm \
+ -v $(shell go env GOPATH)/pkg:/go/pkg$(MOUNT_FLAGS) \
+ -v $(shell pwd):/src/cluster-api-provider-$(INFRA_PROVIDER)$(MOUNT_FLAGS) \
+ $(BUILDER_IMAGE):$(BUILDER_IMAGE_VERSION) $@;
+else
+ controller-gen \
object:headerFile="./hack/boilerplate/boilerplate.generatego.txt" \
paths="./api/..."
+endif
generate-api-ci: generate-manifests generate-go-deepcopy
@if ! (git diff --exit-code ); then \
@@ -635,7 +662,7 @@ cluster-templates: $(KUSTOMIZE)
.PHONY: format-golang
format-golang: ## Format the Go codebase and run auto-fixers if supported by the linter.
ifeq ($(BUILD_IN_CONTAINER),true)
- docker run --rm -t -i \
+ docker run --rm \
-v $(shell go env GOPATH)/pkg:/go/pkg$(MOUNT_FLAGS) \
-v $(shell pwd):/src/cluster-api-provider-$(INFRA_PROVIDER)$(MOUNT_FLAGS) \
$(BUILDER_IMAGE):$(BUILDER_IMAGE_VERSION) $@;
@@ -652,7 +679,7 @@ format-starlark: ## Format the Starlark codebase
.PHONY: format-yaml
format-yaml: ## Lint YAML files
ifeq ($(BUILD_IN_CONTAINER),true)
- docker run --rm -t -i \
+ docker run --rm \
-v $(shell go env GOPATH)/pkg:/go/pkg$(MOUNT_FLAGS) \
-v $(shell pwd):/src/cluster-api-provider-$(INFRA_PROVIDER)$(MOUNT_FLAGS) \
$(BUILDER_IMAGE):$(BUILDER_IMAGE_VERSION) $@;
@@ -668,7 +695,7 @@ endif
.PHONY: lint-golang
lint-golang: ## Lint Golang codebase
ifeq ($(BUILD_IN_CONTAINER),true)
- docker run --rm -t -i \
+ docker run --rm \
-v $(shell go env GOPATH)/pkg:/go/pkg$(MOUNT_FLAGS) \
-v $(shell pwd):/src/cluster-api-provider-$(INFRA_PROVIDER)$(MOUNT_FLAGS) \
$(BUILDER_IMAGE):$(BUILDER_IMAGE_VERSION) $@;
@@ -681,7 +708,7 @@ endif
.PHONY: lint-golang-ci
lint-golang-ci:
ifeq ($(BUILD_IN_CONTAINER),true)
- docker run --rm -t -i \
+ docker run --rm \
-v $(shell go env GOPATH)/pkg:/go/pkg$(MOUNT_FLAGS) \
-v $(shell pwd):/src/cluster-api-provider-$(INFRA_PROVIDER)$(MOUNT_FLAGS) \
$(BUILDER_IMAGE):$(BUILDER_IMAGE_VERSION) $@;
@@ -694,7 +721,7 @@ endif
.PHONY: lint-yaml
lint-yaml: ## Lint YAML files
ifeq ($(BUILD_IN_CONTAINER),true)
- docker run --rm -t -i \
+ docker run --rm \
-v $(shell go env GOPATH)/pkg:/go/pkg$(MOUNT_FLAGS) \
-v $(shell pwd):/src/cluster-api-provider-$(INFRA_PROVIDER)$(MOUNT_FLAGS) \
$(BUILDER_IMAGE):$(BUILDER_IMAGE_VERSION) $@;
@@ -706,7 +733,7 @@ endif
.PHONY: lint-yaml-ci
lint-yaml-ci:
ifeq ($(BUILD_IN_CONTAINER),true)
- docker run --rm -t -i \
+ docker run --rm \
-v $(shell go env GOPATH)/pkg:/go/pkg$(MOUNT_FLAGS) \
-v $(shell pwd):/src/cluster-api-provider-$(INFRA_PROVIDER)$(MOUNT_FLAGS) \
$(BUILDER_IMAGE):$(BUILDER_IMAGE_VERSION) $@;
@@ -719,7 +746,7 @@ DOCKERFILES=$(shell find . -not \( -path ./hack -prune \) -not \( -path ./vendor
.PHONY: lint-dockerfile
lint-dockerfile: ## Lint Dockerfiles
ifeq ($(BUILD_IN_CONTAINER),true)
- docker run --rm -t -i \
+ docker run --rm \
-v $(shell go env GOPATH)/pkg:/go/pkg$(MOUNT_FLAGS) \
-v $(shell pwd):/src/cluster-api-provider-$(INFRA_PROVIDER)$(MOUNT_FLAGS) \
$(BUILDER_IMAGE):$(BUILDER_IMAGE_VERSION) $@;
@@ -730,7 +757,7 @@ endif
lint-links: ## Link Checker
ifeq ($(BUILD_IN_CONTAINER),true)
- docker run --rm -t -i \
+ docker run --rm \
-v $(shell pwd):/src/cluster-api-provider-$(INFRA_PROVIDER)$(MOUNT_FLAGS) \
$(BUILDER_IMAGE):$(BUILDER_IMAGE_VERSION) $@;
else
@@ -751,13 +778,12 @@ format: format-starlark format-golang format-yaml ## Format Codebase
.PHONY: generate-mocks
generate-mocks: ## Generate Mocks
ifeq ($(BUILD_IN_CONTAINER),true)
- docker run --rm -t -i \
+ docker run --rm \
-v $(shell go env GOPATH)/pkg:/go/pkg$(MOUNT_FLAGS) \
-v $(shell pwd):/src/cluster-api-provider-$(INFRA_PROVIDER)$(MOUNT_FLAGS) \
$(BUILDER_IMAGE):$(BUILDER_IMAGE_VERSION) $@;
else
- cd pkg/services/baremetal/client; go run github.com/vektra/mockery/v2@v2.40.2
- cd pkg/services/hcloud/client; go run github.com/vektra/mockery/v2@v2.40.2 --all
+ go run github.com/vektra/mockery/v2@v2.53.4
endif
.PHONY: generate
@@ -804,3 +830,10 @@ create-hetzner-installimage-tgz:
@echo "============= ↓↓↓↓↓ Now update the version number here ↓↓↓↓↓ ============="
@git ls-files | xargs grep -P 'hetzner-installimage.*v\d+\.\d+' || true
@echo "↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑"
+
+builder-image-shell: ## Start an interactive shell in the builder image.
+ docker run --rm -t -i \
+ --entrypoint bash \
+ -v $(shell go env GOPATH)/pkg:/go/pkg$(MOUNT_FLAGS) \
+ -v $(shell pwd):/src/cluster-api-provider-$(INFRA_PROVIDER)$(MOUNT_FLAGS) \
+ $(BUILDER_IMAGE):$(BUILDER_IMAGE_VERSION)
diff --git a/README.md b/README.md
index ed8521b76..432c4a79c 100644
--- a/README.md
+++ b/README.md
@@ -71,20 +71,19 @@ In addition to the pure creation and operation of Kubernetes clusters, this prov
This provider's versions are compatible with the following versions of Cluster API:
-| | CAPI `v1beta1` (`v1.7.x`) | CAPI `v1beta1` (`v1.8.x`) |
-| ------------------------------------ | ------------------------- | ------------------------- |
-| Hetzner Provider `v1.0.0-beta.34-43` | ✅ | ❌ |
-| Hetzner Provider `v1.0.0` | ✅ | ✅ |
-| Hetzner Provider `v1.0.1` | ✅ | ✅ |
+| | CAPI `v1beta1` (`v1.8.x`) | CAPI `v1beta1` (`v1.9.x`) | CAPI `v1beta1` (`v1.10.x`) |
+| ------------------------------------ | ------------------------- | ------------------------- | -------------------------- |
+| Hetzner Provider `v1.0.x` | ✅ | ✅ | ✅ |
+
This provider's versions can install and manage the following versions of Kubernetes:
| | Hetzner Provider `v1.0.x` |
| ----------------- | ------------------------- |
-| Kubernetes 1.28.x | ✅ |
-| Kubernetes 1.29.x | ✅ |
-| Kubernetes 1.30.x | ✅ |
| Kubernetes 1.31.x | ✅ |
+| Kubernetes 1.32.x | ✅ |
+| Kubernetes 1.33.x | ✅ |
+| Kubernetes 1.34.x | ❔ |
Test status:
diff --git a/api/v1beta1/conditions_const.go b/api/v1beta1/conditions_const.go
index 5d4aeee40..9b5111199 100644
--- a/api/v1beta1/conditions_const.go
+++ b/api/v1beta1/conditions_const.go
@@ -233,3 +233,8 @@ const (
// DeprecatedRateLimitExceededCondition reports whether the rate limit has been reached.
DeprecatedRateLimitExceededCondition clusterv1.ConditionType = "RateLimitExceeded"
)
+
+const (
+ // RebootSucceededCondition indicates that the machine got rebooted successfully.
+ RebootSucceededCondition clusterv1.ConditionType = "RebootSucceeded"
+)
diff --git a/api/v1beta1/hcloudmachine_types.go b/api/v1beta1/hcloudmachine_types.go
index 8825f48ed..25ab0b10e 100644
--- a/api/v1beta1/hcloudmachine_types.go
+++ b/api/v1beta1/hcloudmachine_types.go
@@ -20,7 +20,7 @@ import (
"github.com/hetznercloud/hcloud-go/v2/hcloud"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
- capierrors "sigs.k8s.io/cluster-api/errors"
+ capierrors "sigs.k8s.io/cluster-api/errors" //nolint:staticcheck // we will handle that, when we update to capi v1.11
)
const (
@@ -39,14 +39,50 @@ type HCloudMachineSpec struct {
// +optional
ProviderID *string `json:"providerID,omitempty"`
- // Type is the HCloud Machine Type for this machine. It defines the desired server type of server in Hetzner's Cloud API. Example: cpx11.
- // +kubebuilder:validation:Enum=cpx11;cx21;cpx21;cx31;cpx31;cx41;cpx41;cx51;cpx51;ccx11;ccx12;ccx13;ccx21;ccx22;ccx23;ccx31;ccx32;ccx33;ccx41;ccx42;ccx43;ccx51;ccx52;ccx53;ccx62;ccx63;cax11;cax21;cax31;cax41;cx22;cx32;cx42;cx52
+ // Type is the HCloud Machine Type for this machine. It defines the desired server type of
+ // server in Hetzner's Cloud API. You can use the hcloud CLI to get server names (`hcloud
+ // server-type list`) or on https://www.hetzner.com/cloud
+ //
+ // The types follow this pattern: cxNV (shared, cheap), cpxNV (shared, performance), ccxNV
+ // (dedicated), caxNV (ARM)
+ //
+ // N is a number, and V is the version of this machine type. Example: cpx32.
+ //
+ // The list of valid machine types gets changed by Hetzner from time to time. CAPH no longer
+ // validates this string. It is up to you to use a valid type. Not all types are available in all
+ // locations.
Type HCloudMachineType `json:"type"`
// ImageName is the reference to the Machine Image from which to create the machine instance.
// It can reference an image uploaded to Hetzner API in two ways: either directly as the name of an image or as the label of an image.
// +kubebuilder:validation:MinLength=1
- ImageName string `json:"imageName"`
+ // +kubebuilder:validation:Optional
+ // +optional
+ ImageName string `json:"imageName,omitempty"`
+
+ // ImageURL gets used for installing custom node images. If that field is set, the controller
+ // boots a new HCloud machine into rescue mode. Then the script provided by
+ // --hcloud-image-url-command (which you need to provide to the controller binary) will be
+ // copied into the rescue system and executed.
+ //
+ // The controller uses url.ParseRequestURI (Go function) to validate the URL.
+ //
+ // It is up to the script to provision the disk of the hcloud machine accordingly. The process
+ // is considered successful if the last line in the output contains
+ // IMAGE_URL_DONE. If the script terminates with a different last line, then
+ // the process is considered to have failed.
+ //
+ // A Kubernetes event will be created in both (success, failure) cases containing the output
+ // (stdout and stderr) of the script. If the script takes longer than 7 minutes, the
+ // controller cancels the provisioning.
+ //
+ // Docs: https://syself.com/docs/caph/developers/image-url-command
+ //
+ // ImageURL is mutually exclusive to "ImageName".
+ // +kubebuilder:validation:MinLength=1
+ // +kubebuilder:validation:Optional
+ // +optional
+ ImageURL string `json:"imageURL,omitempty"`
// SSHKeys define machine-specific SSH keys and override cluster-wide SSH keys.
// +optional
@@ -96,6 +132,38 @@ type HCloudMachineStatus struct {
// Conditions define the current service state of the HCloudMachine.
// +optional
Conditions clusterv1.Conditions `json:"conditions,omitempty"`
+
+ // BootState indicates the current state during provisioning.
+ //
+ // If Spec.ImageName is set the states will be:
+ // 1. BootingToRealOS
+ // 2. OperatingSystemRunning
+ //
+ // If Spec.ImageURL is set the states will be:
+ // 1. Initializing
+ // 2. EnablingRescue
+ // 3. BootingToRescue
+ // 4. RunningImageCommand
+ // 5. BootingToRealOS
+ // 6. OperatingSystemRunning
+
+ // +optional
+ BootState HCloudBootState `json:"bootState"`
+
+ // BootStateSince is the timestamp of the last change to BootState. It is used to timeout
+ // provisioning if a state takes too long.
+ // +optional
+ BootStateSince metav1.Time `json:"bootStateSince,omitzero"`
+
+ // ExternalIDs contains temporary data during the provisioning process
+ ExternalIDs HCloudMachineStatusExternalIDs `json:"externalIDs,omitempty"`
+}
+
+// HCloudMachineStatusExternalIDs holds temporary data during the provisioning process.
+type HCloudMachineStatusExternalIDs struct {
+ // ActionIDEnableRescueSystem is the hcloud API Action result of EnableRescueSystem.
+ // +optional
+ ActionIDEnableRescueSystem int64 `json:"actionIdEnableRescueSystem,omitzero"`
}
// HCloudMachine is the Schema for the hcloudmachines API.
@@ -118,11 +186,6 @@ type HCloudMachine struct {
Status HCloudMachineStatus `json:"status,omitempty"`
}
-// HCloudMachineSpec returns a DeepCopy.
-func (r *HCloudMachine) HCloudMachineSpec() *HCloudMachineSpec {
- return r.Spec.DeepCopy()
-}
-
// GetConditions returns the observations of the operational state of the HCloudMachine resource.
func (r *HCloudMachine) GetConditions() clusterv1.Conditions {
return r.Status.Conditions
@@ -133,6 +196,15 @@ func (r *HCloudMachine) SetConditions(conditions clusterv1.Conditions) {
r.Status.Conditions = conditions
}
+// SetBootState sets Status.BootStates and updates Status.BootStateSince.
+func (r *HCloudMachine) SetBootState(bootState HCloudBootState) {
+ if r.Status.BootState == bootState {
+ return
+ }
+ r.Status.BootState = bootState
+ r.Status.BootStateSince = metav1.Now()
+}
+
//+kubebuilder:object:root=true
// HCloudMachineList contains a list of HCloudMachine.
diff --git a/api/v1beta1/hcloudmachine_validation.go b/api/v1beta1/hcloudmachine_validation.go
index 81515ef45..988a28414 100644
--- a/api/v1beta1/hcloudmachine_validation.go
+++ b/api/v1beta1/hcloudmachine_validation.go
@@ -17,12 +17,13 @@ limitations under the License.
package v1beta1
import (
+ "net/url"
"reflect"
"k8s.io/apimachinery/pkg/util/validation/field"
)
-func validateHCloudMachineSpec(oldSpec, newSpec HCloudMachineSpec) field.ErrorList {
+func validateHCloudMachineSpecUpdate(oldSpec, newSpec HCloudMachineSpec) field.ErrorList {
var allErrs field.ErrorList
// Type is immutable
if !reflect.DeepEqual(oldSpec.Type, newSpec.Type) {
@@ -38,6 +39,13 @@ func validateHCloudMachineSpec(oldSpec, newSpec HCloudMachineSpec) field.ErrorLi
)
}
+ // ImageURL is immutable
+ if !reflect.DeepEqual(oldSpec.ImageURL, newSpec.ImageURL) {
+ allErrs = append(allErrs,
+ field.Invalid(field.NewPath("spec", "imageURL"), newSpec.ImageURL, "field is immutable"),
+ )
+ }
+
// SSHKeys is immutable
if !reflect.DeepEqual(oldSpec.SSHKeys, newSpec.SSHKeys) {
allErrs = append(allErrs,
@@ -52,5 +60,30 @@ func validateHCloudMachineSpec(oldSpec, newSpec HCloudMachineSpec) field.ErrorLi
)
}
+ allErrs = append(allErrs, validateHCloudMachineSpec(newSpec)...)
+
+ return allErrs
+}
+
+func validateHCloudMachineSpec(spec HCloudMachineSpec) field.ErrorList {
+ var allErrs field.ErrorList
+ if spec.ImageName != "" && spec.ImageURL != "" {
+ allErrs = append(allErrs,
+ field.Invalid(field.NewPath("spec", "imageName"), spec.ImageName, "imageName and imageURL are mutually exclusive"))
+ }
+
+ if spec.ImageName == "" && spec.ImageURL == "" {
+ allErrs = append(allErrs,
+ field.Invalid(field.NewPath("spec", "imageName"), spec.ImageName, "imageName and imageURL empty. One of these attributes must be set"))
+ }
+
+ if spec.ImageURL != "" {
+ _, err := url.ParseRequestURI(spec.ImageURL)
+ if err != nil {
+ allErrs = append(allErrs,
+ field.Invalid(field.NewPath("spec", "imageURL"), spec.ImageURL, err.Error()))
+ }
+ }
+
return allErrs
}
diff --git a/api/v1beta1/hcloudmachine_validation_test.go b/api/v1beta1/hcloudmachine_validation_test.go
index 3d05c1486..193c96b1f 100644
--- a/api/v1beta1/hcloudmachine_validation_test.go
+++ b/api/v1beta1/hcloudmachine_validation_test.go
@@ -17,9 +17,11 @@ limitations under the License.
package v1beta1
import (
+ "strings"
"testing"
"github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
"k8s.io/apimachinery/pkg/util/validation/field"
)
@@ -28,7 +30,7 @@ type args struct {
newSpec HCloudMachineSpec
}
-func TestValidateHCloudMachineSpec(t *testing.T) {
+func TestValidateHCloudMachineSpecUpdate(t *testing.T) {
tests := []struct {
name string
args args
@@ -38,10 +40,12 @@ func TestValidateHCloudMachineSpec(t *testing.T) {
name: "Immutable Type",
args: args{
oldSpec: HCloudMachineSpec{
- Type: "cpx11",
+ ImageName: "ubuntu-24.04",
+ Type: "cpx11",
},
newSpec: HCloudMachineSpec{
- Type: "cx21",
+ ImageName: "ubuntu-24.04",
+ Type: "cx21",
},
},
want: field.Invalid(field.NewPath("spec", "type"), "cx21", "field is immutable"),
@@ -58,10 +62,23 @@ func TestValidateHCloudMachineSpec(t *testing.T) {
},
want: field.Invalid(field.NewPath("spec", "imageName"), "centos-7", "field is immutable"),
},
+ {
+ name: "Immutable ImageURL",
+ args: args{
+ oldSpec: HCloudMachineSpec{
+ ImageURL: "oci://ghcr.io/example/foo:v1",
+ },
+ newSpec: HCloudMachineSpec{
+ ImageURL: "oci://ghcr.io/example/foo:v2",
+ },
+ },
+ want: field.Invalid(field.NewPath("spec", "imageURL"), "oci://ghcr.io/example/foo:v2", "field is immutable"),
+ },
{
name: "Immutable SSHKeys",
args: args{
oldSpec: HCloudMachineSpec{
+ ImageName: "ubuntu-24.04",
SSHKeys: []SSHKey{
{
Name: "ssh-key-1",
@@ -70,6 +87,7 @@ func TestValidateHCloudMachineSpec(t *testing.T) {
},
},
newSpec: HCloudMachineSpec{
+ ImageName: "ubuntu-24.04",
SSHKeys: []SSHKey{
{
Name: "ssh-key-1",
@@ -97,9 +115,11 @@ func TestValidateHCloudMachineSpec(t *testing.T) {
name: "Immutable PlacementGroupName",
args: args{
oldSpec: HCloudMachineSpec{
+ ImageName: "ubuntu-24.04",
PlacementGroupName: createPlacementGroupName("placement-group-1"),
},
newSpec: HCloudMachineSpec{
+ ImageName: "ubuntu-24.04",
PlacementGroupName: createPlacementGroupName("placement-group-2"),
},
},
@@ -124,24 +144,21 @@ func TestValidateHCloudMachineSpec(t *testing.T) {
want: nil,
},
}
+
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
- got := validateHCloudMachineSpec(tt.args.oldSpec, tt.args.newSpec)
+ got := validateHCloudMachineSpecUpdate(tt.args.oldSpec, tt.args.newSpec)
- if len(got) == 0 {
+ if tt.want == nil {
assert.Empty(t, got)
+ return
}
- if len(got) > 1 {
- t.Errorf("got length: %d greater than 1", len(got))
- }
+ assert.Equal(t, len(got), 1, "got length: %d greater than 1: %+v", len(got), got)
- // assert if length of got is 1
- if len(got) == 1 {
- assert.Equal(t, tt.want.Type, got[0].Type)
- assert.Equal(t, tt.want.Field, got[0].Field)
- assert.Equal(t, tt.want.Detail, got[0].Detail)
- }
+ assert.Equal(t, tt.want.Type, got[0].Type)
+ assert.Equal(t, tt.want.Field, got[0].Field)
+ assert.Equal(t, tt.want.Detail, got[0].Detail)
})
}
}
@@ -149,3 +166,24 @@ func TestValidateHCloudMachineSpec(t *testing.T) {
func createPlacementGroupName(name string) *string {
return &name
}
+
+func TestValidateHCloudMachineSpec(t *testing.T) {
+ allErrs := validateHCloudMachineSpec(HCloudMachineSpec{
+ ImageURL: "not-a-valid-url",
+ })
+ require.Equal(t, `spec.imageURL: Invalid value: "not-a-valid-url": parse "not-a-valid-url": invalid URI for request`, errorsToString(allErrs))
+
+ allErrs = validateHCloudMachineSpec(HCloudMachineSpec{
+ ImageName: "foo-name",
+ ImageURL: "oci://ghcr.io/example/foo:v1",
+ })
+ require.Equal(t, `spec.imageName: Invalid value: "foo-name": imageName and imageURL are mutually exclusive`, errorsToString(allErrs))
+}
+
+func errorsToString(allErrs field.ErrorList) string {
+ s := make([]string, 0, len(allErrs))
+ for _, err := range allErrs {
+ s = append(s, err.Error())
+ }
+ return strings.Join(s, "\n")
+}
diff --git a/api/v1beta1/hcloudmachine_webhook.go b/api/v1beta1/hcloudmachine_webhook.go
index 7c816398a..7acf7fadf 100644
--- a/api/v1beta1/hcloudmachine_webhook.go
+++ b/api/v1beta1/hcloudmachine_webhook.go
@@ -17,11 +17,11 @@ limitations under the License.
package v1beta1
import (
+ "context"
"fmt"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/runtime"
- "k8s.io/apimachinery/pkg/util/validation/field"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/webhook"
"sigs.k8s.io/controller-runtime/pkg/webhook/admission"
@@ -29,13 +29,18 @@ import (
"github.com/syself/cluster-api-provider-hetzner/pkg/utils"
)
+type hcloudMachineWebhook struct{}
+
// log is for logging in this package.
var hcloudmachinelog = utils.GetDefaultLogger("info").WithName("hcloudmachine-resource")
// SetupWebhookWithManager initializes webhook manager for HCloudMachine.
func (r *HCloudMachine) SetupWebhookWithManager(mgr ctrl.Manager) error {
+ w := new(hcloudMachineWebhook)
return ctrl.NewWebhookManagedBy(mgr).
For(r).
+ WithDefaulter(w).
+ WithValidator(w).
Complete()
}
@@ -48,46 +53,60 @@ func (r *HCloudMachineList) SetupWebhookWithManager(mgr ctrl.Manager) error {
//+kubebuilder:webhook:path=/mutate-infrastructure-cluster-x-k8s-io-v1beta1-hcloudmachine,mutating=true,failurePolicy=fail,sideEffects=None,groups=infrastructure.cluster.x-k8s.io,resources=hcloudmachines,verbs=create;update,versions=v1beta1,name=mutation.hcloudmachine.infrastructure.cluster.x-k8s.io,admissionReviewVersions={v1,v1beta1}
-var _ webhook.Defaulter = &HCloudMachine{}
+var _ webhook.CustomDefaulter = &hcloudMachineWebhook{}
-// Default implements webhook.Defaulter so a webhook will be registered for the type.
-func (r *HCloudMachine) Default() {
+// Default implements webhook.CustomDefaulter so a webhook will be registered for the type.
+func (*hcloudMachineWebhook) Default(_ context.Context, obj runtime.Object) error {
+ r, ok := obj.(*HCloudMachine)
+ if !ok {
+ return fmt.Errorf("expected an HCloudMachine object but got %T", r)
+ }
if r.Spec.PublicNetwork == nil {
r.Spec.PublicNetwork = &PublicNetworkSpec{
EnableIPv4: true,
EnableIPv6: true,
}
}
+ return nil
}
//+kubebuilder:webhook:path=/validate-infrastructure-cluster-x-k8s-io-v1beta1-hcloudmachine,mutating=false,failurePolicy=fail,sideEffects=None,groups=infrastructure.cluster.x-k8s.io,resources=hcloudmachines,verbs=create;update,versions=v1beta1,name=validation.hcloudmachine.infrastructure.cluster.x-k8s.io,admissionReviewVersions={v1,v1beta1}
-var _ webhook.Validator = &HCloudMachine{}
+var _ webhook.CustomValidator = &hcloudMachineWebhook{}
+
+// ValidateCreate implements webhook.CustomValidator so a webhook will be registered for the type.
+func (*hcloudMachineWebhook) ValidateCreate(_ context.Context, obj runtime.Object) (admission.Warnings, error) {
+ r, ok := obj.(*HCloudMachine)
+ if !ok {
+ return nil, fmt.Errorf("expected an HCloudMachine object but got %T", r)
+ }
-// ValidateCreate implements webhook.Validator so a webhook will be registered for the type.
-func (r *HCloudMachine) ValidateCreate() (admission.Warnings, error) {
hcloudmachinelog.V(1).Info("validate create", "name", r.Name)
- var allErrs field.ErrorList
+
+ allErrs := validateHCloudMachineSpec(r.Spec)
return nil, aggregateObjErrors(r.GroupVersionKind().GroupKind(), r.Name, allErrs)
}
-// ValidateUpdate implements webhook.Validator so a webhook will be registered for the type.
-func (r *HCloudMachine) ValidateUpdate(old runtime.Object) (admission.Warnings, error) {
+// ValidateUpdate implements webhook.CustomValidator so a webhook will be registered for the type.
+func (*hcloudMachineWebhook) ValidateUpdate(_ context.Context, oldObj, newObj runtime.Object) (admission.Warnings, error) {
+ r, ok := newObj.(*HCloudMachine)
+ if !ok {
+ return nil, fmt.Errorf("expected an HCloudMachine object but got %T", r)
+ }
hcloudmachinelog.V(1).Info("validate update", "name", r.Name)
- oldM, ok := old.(*HCloudMachine)
+ oldM, ok := oldObj.(*HCloudMachine)
if !ok {
- return nil, apierrors.NewBadRequest(fmt.Sprintf("expected an HCloudMachine but got a %T", old))
+ return nil, apierrors.NewBadRequest(fmt.Sprintf("expected an HCloudMachine but got a %T", oldObj))
}
- allErrs := validateHCloudMachineSpec(oldM.Spec, r.Spec)
+ allErrs := validateHCloudMachineSpecUpdate(oldM.Spec, r.Spec)
return nil, aggregateObjErrors(r.GroupVersionKind().GroupKind(), r.Name, allErrs)
}
-// ValidateDelete implements webhook.Validator so a webhook will be registered for the type.
-func (r *HCloudMachine) ValidateDelete() (admission.Warnings, error) {
- hcloudmachinelog.V(1).Info("validate delete", "name", r.Name)
+// ValidateDelete implements webhook.CustomValidator so a webhook will be registered for the type.
+func (r *hcloudMachineWebhook) ValidateDelete(_ context.Context, _ runtime.Object) (admission.Warnings, error) {
return nil, nil
}
diff --git a/api/v1beta1/hcloudmachinetemplate_webhook.go b/api/v1beta1/hcloudmachinetemplate_webhook.go
index a616aef11..b51753992 100644
--- a/api/v1beta1/hcloudmachinetemplate_webhook.go
+++ b/api/v1beta1/hcloudmachinetemplate_webhook.go
@@ -30,29 +30,39 @@ import (
"sigs.k8s.io/controller-runtime/pkg/webhook/admission"
)
-// SetupWebhookWithManager initializes webhook manager for HetznerMachineTemplate.
-func (r *HCloudMachineTemplateWebhook) SetupWebhookWithManager(mgr ctrl.Manager) error {
+// SetupWebhookWithManager initializes webhook manager for HCloudMachineTemplate.
+func (r *HCloudMachineTemplate) SetupWebhookWithManager(mgr ctrl.Manager) error {
+ w := new(hcloudMachineTemplateWebhook)
return ctrl.NewWebhookManagedBy(mgr).
- For(&HCloudMachineTemplate{}).
- WithValidator(r).
+ For(r).
+ WithValidator(w).
+ WithDefaulter(w).
Complete()
}
// HCloudMachineTemplateWebhook implements a custom validation webhook for HCloudMachineTemplate.
// +kubebuilder:object:generate=false
-type HCloudMachineTemplateWebhook struct{}
+type hcloudMachineTemplateWebhook struct{}
+
+// Default implements admission.CustomDefaulter.
+func (*hcloudMachineTemplateWebhook) Default(_ context.Context, _ runtime.Object) error {
+ return nil
+}
// +kubebuilder:webhook:path=/validate-infrastructure-cluster-x-k8s-io-v1beta1-hcloudmachinetemplate,mutating=false,sideEffects=None,failurePolicy=fail,sideEffects=None,groups=infrastructure.cluster.x-k8s.io,resources=hcloudmachinetemplates,verbs=create;update,versions=v1beta1,name=validation.hcloudmachinetemplate.infrastructure.x-k8s.io,admissionReviewVersions=v1;v1beta1
-var _ webhook.CustomValidator = &HCloudMachineTemplateWebhook{}
+var (
+ _ webhook.CustomValidator = &hcloudMachineTemplateWebhook{}
+ _ webhook.CustomDefaulter = &hcloudMachineTemplateWebhook{}
+)
-// ValidateCreate implements webhook.Validator so a webhook will be registered for the type.
-func (r *HCloudMachineTemplateWebhook) ValidateCreate(_ context.Context, _ runtime.Object) (admission.Warnings, error) {
+// ValidateCreate implements webhook.CustomValidator so a webhook will be registered for the type.
+func (*hcloudMachineTemplateWebhook) ValidateCreate(_ context.Context, _ runtime.Object) (admission.Warnings, error) {
return nil, nil
}
-// ValidateUpdate implements webhook.Validator so a webhook will be registered for the type.
-func (r *HCloudMachineTemplateWebhook) ValidateUpdate(ctx context.Context, oldRaw runtime.Object, newRaw runtime.Object) (admission.Warnings, error) {
+// ValidateUpdate implements webhook.CustomValidator so a webhook will be registered for the type.
+func (*hcloudMachineTemplateWebhook) ValidateUpdate(ctx context.Context, oldRaw runtime.Object, newRaw runtime.Object) (admission.Warnings, error) {
newHCloudMachineTemplate, ok := newRaw.(*HCloudMachineTemplate)
if !ok {
return nil, apierrors.NewBadRequest(fmt.Sprintf("expected a HCloudMachineTemplate but got a %T", newRaw))
@@ -74,7 +84,7 @@ func (r *HCloudMachineTemplateWebhook) ValidateUpdate(ctx context.Context, oldRa
return nil, aggregateObjErrors(newHCloudMachineTemplate.GroupVersionKind().GroupKind(), newHCloudMachineTemplate.Name, allErrs)
}
-// ValidateDelete implements webhook.Validator so a webhook will be registered for the type.
-func (r *HCloudMachineTemplateWebhook) ValidateDelete(_ context.Context, _ runtime.Object) (admission.Warnings, error) {
+// ValidateDelete implements webhook.CustomValidator so a webhook will be registered for the type.
+func (*hcloudMachineTemplateWebhook) ValidateDelete(_ context.Context, _ runtime.Object) (admission.Warnings, error) {
return nil, nil
}
diff --git a/api/v1beta1/hcloudremediation_webhook.go b/api/v1beta1/hcloudremediation_webhook.go
index 91adabbf1..3653ab50d 100644
--- a/api/v1beta1/hcloudremediation_webhook.go
+++ b/api/v1beta1/hcloudremediation_webhook.go
@@ -17,42 +17,50 @@ limitations under the License.
package v1beta1
import (
+ "context"
+
"k8s.io/apimachinery/pkg/runtime"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/webhook"
"sigs.k8s.io/controller-runtime/pkg/webhook/admission"
)
+type hcloudRemediationWebhook struct{}
+
// SetupWebhookWithManager initializes webhook manager for HCloudRemediation.
func (r *HCloudRemediation) SetupWebhookWithManager(mgr ctrl.Manager) error {
+ w := new(hcloudRemediationWebhook)
return ctrl.NewWebhookManagedBy(mgr).
For(r).
+ WithValidator(w).
+ WithDefaulter(w).
Complete()
}
//+kubebuilder:webhook:path=/mutate-infrastructure-cluster-x-k8s-io-v1beta1-hcloudremediation,mutating=true,failurePolicy=fail,sideEffects=None,groups=infrastructure.cluster.x-k8s.io,resources=hcloudremediations,verbs=create;update,versions=v1beta1,name=mutation.hcloudremediation.infrastructure.cluster.x-k8s.io,admissionReviewVersions={v1,v1beta1}
-var _ webhook.Defaulter = &HCloudRemediation{}
+var _ webhook.CustomDefaulter = &hcloudRemediationWebhook{}
-// Default implements webhook.Defaulter so a webhook will be registered for the type.
-func (r *HCloudRemediation) Default() {
+// Default implements webhook.CustomDefaulter so a webhook will be registered for the type.
+func (*hcloudRemediationWebhook) Default(_ context.Context, _ runtime.Object) error {
+ return nil
}
//+kubebuilder:webhook:path=/validate-infrastructure-cluster-x-k8s-io-v1beta1-hcloudremediation,mutating=false,failurePolicy=fail,sideEffects=None,groups=infrastructure.cluster.x-k8s.io,resources=hcloudremediations,verbs=create;update,versions=v1beta1,name=validation.hcloudremediation.infrastructure.cluster.x-k8s.io,admissionReviewVersions={v1,v1beta1}
-var _ webhook.Validator = &HCloudRemediation{}
+var _ webhook.CustomValidator = &hcloudRemediationWebhook{}
-// ValidateCreate implements webhook.Validator so a webhook will be registered for the type.
-func (r *HCloudRemediation) ValidateCreate() (admission.Warnings, error) {
+// ValidateCreate implements webhook.CustomValidator so a webhook will be registered for the type.
+func (*hcloudRemediationWebhook) ValidateCreate(_ context.Context, _ runtime.Object) (admission.Warnings, error) {
return nil, nil
}
-// ValidateUpdate implements webhook.Validator so a webhook will be registered for the type.
-func (r *HCloudRemediation) ValidateUpdate(runtime.Object) (admission.Warnings, error) {
+// ValidateUpdate implements webhook.CustomValidator so a webhook will be registered for the type.
+func (*hcloudRemediationWebhook) ValidateUpdate(_ context.Context, _ runtime.Object, _ runtime.Object) (admission.Warnings, error) {
return nil, nil
}
-// ValidateDelete implements webhook.Validator so a webhook will be registered for the type.
-func (r *HCloudRemediation) ValidateDelete() (admission.Warnings, error) {
+// ValidateDelete implements webhook.CustomValidator so a webhook will be registered for the type.
+func (*hcloudRemediationWebhook) ValidateDelete(_ context.Context, _ runtime.Object) (admission.Warnings, error) {
return nil, nil
}
diff --git a/api/v1beta1/hcloudremediationtemplate_webhook.go b/api/v1beta1/hcloudremediationtemplate_webhook.go
index a0921568e..b5e31f747 100644
--- a/api/v1beta1/hcloudremediationtemplate_webhook.go
+++ b/api/v1beta1/hcloudremediationtemplate_webhook.go
@@ -17,42 +17,50 @@ limitations under the License.
package v1beta1
import (
+ "context"
+
"k8s.io/apimachinery/pkg/runtime"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/webhook"
"sigs.k8s.io/controller-runtime/pkg/webhook/admission"
)
+type hcloudRemediationTemplateWebhook struct{}
+
// SetupWebhookWithManager initializes webhook manager for HCloudRemediationTemplate.
func (r *HCloudRemediationTemplate) SetupWebhookWithManager(mgr ctrl.Manager) error {
+ w := new(hcloudRemediationTemplateWebhook)
return ctrl.NewWebhookManagedBy(mgr).
For(r).
+ WithValidator(w).
+ WithDefaulter(w).
Complete()
}
//+kubebuilder:webhook:path=/mutate-infrastructure-cluster-x-k8s-io-v1beta1-hcloudremediationtemplate,mutating=true,failurePolicy=fail,sideEffects=None,groups=infrastructure.cluster.x-k8s.io,resources=hcloudremediationtemplates,verbs=create;update,versions=v1beta1,name=mhcloudremediationtemplate.kb.io,admissionReviewVersions=v1
-var _ webhook.Defaulter = &HCloudRemediationTemplate{}
+var _ webhook.CustomDefaulter = &hcloudRemediationTemplateWebhook{}
-// Default implements webhook.Defaulter so a webhook will be registered for the type.
-func (r *HCloudRemediationTemplate) Default() {
+// Default implements webhook.CustomDefaulter so a webhook will be registered for the type.
+func (*hcloudRemediationTemplateWebhook) Default(_ context.Context, _ runtime.Object) error {
+ return nil
}
//+kubebuilder:webhook:path=/validate-infrastructure-cluster-x-k8s-io-v1beta1-hcloudremediationtemplate,mutating=false,failurePolicy=fail,sideEffects=None,groups=infrastructure.cluster.x-k8s.io,resources=hcloudremediationtemplates,verbs=create;update,versions=v1beta1,name=vhcloudremediationtemplate.kb.io,admissionReviewVersions=v1
-var _ webhook.Validator = &HCloudRemediationTemplate{}
+var _ webhook.CustomValidator = &hcloudRemediationTemplateWebhook{}
-// ValidateCreate implements webhook.Validator so a webhook will be registered for the type.
-func (r *HCloudRemediationTemplate) ValidateCreate() (admission.Warnings, error) {
+// ValidateCreate implements webhook.CustomValidator so a webhook will be registered for the type.
+func (*hcloudRemediationTemplateWebhook) ValidateCreate(_ context.Context, _ runtime.Object) (admission.Warnings, error) {
return nil, nil
}
-// ValidateUpdate implements webhook.Validator so a webhook will be registered for the type.
-func (r *HCloudRemediationTemplate) ValidateUpdate(runtime.Object) (admission.Warnings, error) {
+// ValidateUpdate implements webhook.CustomValidator so a webhook will be registered for the type.
+func (*hcloudRemediationTemplateWebhook) ValidateUpdate(_ context.Context, _ runtime.Object, _ runtime.Object) (admission.Warnings, error) {
return nil, nil
}
-// ValidateDelete implements webhook.Validator so a webhook will be registered for the type.
-func (r *HCloudRemediationTemplate) ValidateDelete() (admission.Warnings, error) {
+// ValidateDelete implements webhook.CustomValidator so a webhook will be registered for the type.
+func (*hcloudRemediationTemplateWebhook) ValidateDelete(_ context.Context, _ runtime.Object) (admission.Warnings, error) {
return nil, nil
}
diff --git a/api/v1beta1/hetznerbaremetalhost_types.go b/api/v1beta1/hetznerbaremetalhost_types.go
index bd74c0f60..95c40ab17 100644
--- a/api/v1beta1/hetznerbaremetalhost_types.go
+++ b/api/v1beta1/hetznerbaremetalhost_types.go
@@ -189,6 +189,7 @@ const (
// RebootTypeSoftware defines the software reboot. "Send CTRL+ALT+DEL to the server".
RebootTypeSoftware RebootType = "sw"
// RebootTypeHardware defines the hardware reboot. "Execute an automatic hardware reset".
+ // The RebootTypeHardware is supported by all servers.
RebootTypeHardware RebootType = "hw"
// RebootTypeManual defines the manual reboot. "Order a manual power cycle".
RebootTypeManual RebootType = "man"
@@ -310,6 +311,22 @@ type ControllerGeneratedStatus struct {
// Conditions define the current service state of the HetznerBareMetalHost.
// +optional
Conditions clusterv1.Conditions `json:"conditions,omitempty"`
+
+ // ExternalIDs contains values from external systems.
+ // +optional
+ ExternalIDs ExternalIDs `json:"externalIDs,omitzero"`
+}
+
+// ExternalIDs contains values from external systems.
+type ExternalIDs struct {
+ // RebootAnnotationNodeBootID reflects the BootID of the Node resource in the workload-cluster.
+ // Only set when the machine gets rebooted.
+ // +optional
+ RebootAnnotationNodeBootID string `json:"rebootAnnotationNodeBootID,omitempty"`
+
+ // RebootAnnotationSince indicates when the reboot via Annotation started.
+ // +optional
+ RebootAnnotationSince metav1.Time `json:"rebootAnnotationSince,omitzero"`
}
// GetIPAddress returns the IPv6 if set, otherwise the IPv4.
diff --git a/api/v1beta1/hetznerbaremetalhost_webhook.go b/api/v1beta1/hetznerbaremetalhost_webhook.go
index b6379682a..bfb87d1c1 100644
--- a/api/v1beta1/hetznerbaremetalhost_webhook.go
+++ b/api/v1beta1/hetznerbaremetalhost_webhook.go
@@ -29,36 +29,39 @@ import (
"sigs.k8s.io/controller-runtime/pkg/webhook/admission"
)
-// HetznerBareMetalHostWebhook implements validating and defaulting webhook for HetznerBareMetalHost.
+// hetznerBareMetalHostWebhook implements validating and defaulting webhook for HetznerBareMetalHost.
// +k8s:deepcopy-gen=false
-type HetznerBareMetalHostWebhook struct {
+type hetznerBareMetalHostWebhook struct {
c client.Client
}
// SetupWebhookWithManager initializes webhook manager for HetznerBareMetalHost.
-func (hw *HetznerBareMetalHostWebhook) SetupWebhookWithManager(mgr ctrl.Manager) error {
- hw.c = mgr.GetClient()
+func (host *HetznerBareMetalHost) SetupWebhookWithManager(mgr ctrl.Manager) error {
+ w := new(hetznerBareMetalHostWebhook)
+ w.c = mgr.GetClient()
return ctrl.NewWebhookManagedBy(mgr).
- For(&HetznerBareMetalHost{}).
- WithValidator(hw).
+ For(host).
+ WithValidator(w).
+ WithDefaulter(w).
Complete()
}
//+kubebuilder:webhook:path=/mutate-infrastructure-cluster-x-k8s-io-v1beta1-hetznerbaremetalhost,mutating=true,failurePolicy=fail,sideEffects=None,groups=infrastructure.cluster.x-k8s.io,resources=hetznerbaremetalhosts,verbs=create;update,versions=v1beta1,name=mutation.hetznerbaremetalhost.infrastructure.cluster.x-k8s.io,admissionReviewVersions={v1,v1beta1}
-var _ webhook.Defaulter = &HetznerBareMetalHost{}
+var _ webhook.CustomDefaulter = &hetznerBareMetalHostWebhook{}
-// Default implements webhook.Defaulter so a webhook will be registered for the type.
-func (host *HetznerBareMetalHost) Default() {
+// Default implements webhook.CustomDefaulter so a webhook will be registered for the type.
+func (hw *hetznerBareMetalHostWebhook) Default(_ context.Context, _ runtime.Object) error {
+ return nil
}
//+kubebuilder:webhook:path=/validate-infrastructure-cluster-x-k8s-io-v1beta1-hetznerbaremetalhost,mutating=false,failurePolicy=fail,sideEffects=None,groups=infrastructure.cluster.x-k8s.io,resources=hetznerbaremetalhosts,verbs=create;update,versions=v1beta1,name=validation.hetznerbaremetalhost.infrastructure.cluster.x-k8s.io,admissionReviewVersions={v1,v1beta1}
-var _ webhook.CustomValidator = &HetznerBareMetalHostWebhook{}
+var _ webhook.CustomValidator = &hetznerBareMetalHostWebhook{}
-// ValidateCreate implements webhook.Validator so a webhook will be registered for the type.
-func (hw *HetznerBareMetalHostWebhook) ValidateCreate(ctx context.Context, obj runtime.Object) (admission.Warnings, error) {
+// ValidateCreate implements webhook.CustomValidator so a webhook will be registered for the type.
+func (hw *hetznerBareMetalHostWebhook) ValidateCreate(ctx context.Context, obj runtime.Object) (admission.Warnings, error) {
host, ok := (obj).(*HetznerBareMetalHost)
if !ok {
return admission.Warnings{}, apierrors.NewBadRequest(fmt.Sprintf("expected HetznerBareMetalHost, but got %T", host))
@@ -82,8 +85,8 @@ func (hw *HetznerBareMetalHostWebhook) ValidateCreate(ctx context.Context, obj r
return nil, aggregateObjErrors(hetznerBareMetalHostList.GroupVersionKind().GroupKind(), host.Name, allErrs)
}
-// ValidateUpdate implements webhook.Validator so a webhook will be registered for the type.
-func (hw *HetznerBareMetalHostWebhook) ValidateUpdate(_ context.Context, oldObj, newObj runtime.Object) (admission.Warnings, error) {
+// ValidateUpdate implements webhook.CustomValidator so a webhook will be registered for the type.
+func (hw *hetznerBareMetalHostWebhook) ValidateUpdate(_ context.Context, oldObj, newObj runtime.Object) (admission.Warnings, error) {
oldHost, ok := oldObj.(*HetznerBareMetalHost)
if !ok {
return admission.Warnings{}, apierrors.NewBadRequest(fmt.Sprintf("expected an ClusterStack but got a %T", oldObj))
@@ -105,7 +108,7 @@ func (hw *HetznerBareMetalHostWebhook) ValidateUpdate(_ context.Context, oldObj,
return nil, aggregateObjErrors(newHost.GroupVersionKind().GroupKind(), newHost.Name, allErrs)
}
-// ValidateDelete implements webhook.Validator so a webhook will be registered for the type.
-func (hw *HetznerBareMetalHostWebhook) ValidateDelete(_ context.Context, _ runtime.Object) (admission.Warnings, error) {
+// ValidateDelete implements webhook.CustomValidator so a webhook will be registered for the type.
+func (hw *hetznerBareMetalHostWebhook) ValidateDelete(_ context.Context, _ runtime.Object) (admission.Warnings, error) {
return nil, nil
}
diff --git a/api/v1beta1/hetznerbaremetalmachine_types.go b/api/v1beta1/hetznerbaremetalmachine_types.go
index 66e5eff3f..a671c9806 100644
--- a/api/v1beta1/hetznerbaremetalmachine_types.go
+++ b/api/v1beta1/hetznerbaremetalmachine_types.go
@@ -25,7 +25,7 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/selection"
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
- capierrors "sigs.k8s.io/cluster-api/errors"
+ capierrors "sigs.k8s.io/cluster-api/errors" //nolint:staticcheck // we will handle that, when we update to capi v1.11
)
const (
@@ -121,8 +121,8 @@ type SSHSpec struct {
// +optional
PortAfterInstallImage int `json:"portAfterInstallImage"`
- // PortAfterCloudInit specifies the port that has to be used to connect to the machine
- // by reaching the server via SSH after the successful completion of cloud init.
+ // PortAfterCloudInit is deprecated. Since PR Install Cloud-Init-Data via post-install.sh #1407 this field is not functional.
+ // Deprecated: This field is not used anymore.
// +optional
PortAfterCloudInit int `json:"portAfterCloudInit"`
}
@@ -187,6 +187,11 @@ type Image struct {
// URL defines the remote URL for downloading a tar, tar.gz, tar.bz, tar.bz2, tar.xz, tgz, tbz, txz image.
URL string `json:"url,omitempty"`
+ // UseCustomImageURLCommand makes the controller use the command provided by `--baremetal-image-url-command` instead of installimage.
+ // Docs: https://syself.com/docs/caph/developers/image-url-command
+ // +optional
+ UseCustomImageURLCommand bool `json:"useCustomImageURLCommand"`
+
// Name defines the archive name after download. This has to be a valid name for Installimage.
Name string `json:"name,omitempty"`
@@ -197,6 +202,9 @@ type Image struct {
// GetDetails returns the path of the image and whether the image has to be downloaded.
func (image Image) GetDetails() (imagePath string, needsDownload bool, errorMessage string) {
// If image is set, then the URL is also set and we have to download a remote file
+ if image.UseCustomImageURLCommand {
+ return "", false, "internal error: image.UseCustomImageURLCommand is active. Method GetDetails() should be used for the traditional way (without image-url-command)."
+ }
switch {
case image.Name != "" && image.URL != "":
suffix, err := GetImageSuffix(image.URL)
@@ -340,19 +348,19 @@ type HetznerBareMetalMachine struct {
}
// GetConditions returns the observations of the operational state of the HetznerBareMetalMachine resource.
-func (bmMachine *HetznerBareMetalMachine) GetConditions() clusterv1.Conditions {
- return bmMachine.Status.Conditions
+func (hbmm *HetznerBareMetalMachine) GetConditions() clusterv1.Conditions {
+ return hbmm.Status.Conditions
}
// SetConditions sets the underlying service state of the HetznerBareMetalMachine to the predescribed clusterv1.Conditions.
-func (bmMachine *HetznerBareMetalMachine) SetConditions(conditions clusterv1.Conditions) {
- bmMachine.Status.Conditions = conditions
+func (hbmm *HetznerBareMetalMachine) SetConditions(conditions clusterv1.Conditions) {
+ hbmm.Status.Conditions = conditions
}
// SetFailure sets a failure reason and message.
-func (bmMachine *HetznerBareMetalMachine) SetFailure(reason capierrors.MachineStatusError, message string) {
- bmMachine.Status.FailureReason = &reason
- bmMachine.Status.FailureMessage = &message
+func (hbmm *HetznerBareMetalMachine) SetFailure(reason capierrors.MachineStatusError, message string) {
+ hbmm.Status.FailureReason = &reason
+ hbmm.Status.FailureMessage = &message
}
// GetImageSuffix tests whether the suffix is known and outputs it if yes. Otherwise it returns an error.
@@ -379,8 +387,8 @@ func GetImageSuffix(url string) (string, error) {
}
// HasHostAnnotation checks whether the annotation that references a host exists.
-func (bmMachine *HetznerBareMetalMachine) HasHostAnnotation() bool {
- annotations := bmMachine.GetAnnotations()
+func (hbmm *HetznerBareMetalMachine) HasHostAnnotation() bool {
+ annotations := hbmm.GetAnnotations()
if annotations == nil {
return false
}
diff --git a/api/v1beta1/hetznerbaremetalmachine_types_test.go b/api/v1beta1/hetznerbaremetalmachine_types_test.go
index a83d49439..b00934ea2 100644
--- a/api/v1beta1/hetznerbaremetalmachine_types_test.go
+++ b/api/v1beta1/hetznerbaremetalmachine_types_test.go
@@ -23,7 +23,7 @@ import (
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
"github.com/stretchr/testify/require"
- capierrors "sigs.k8s.io/cluster-api/errors"
+ capierrors "sigs.k8s.io/cluster-api/errors" //nolint:staticcheck // we will handle that, when we update to capi v1.11
)
var _ = Describe("Test Image.GetDetails", func() {
diff --git a/api/v1beta1/hetznerbaremetalmachine_validation_test.go b/api/v1beta1/hetznerbaremetalmachine_validation_test.go
index 1f233be12..9c145031a 100644
--- a/api/v1beta1/hetznerbaremetalmachine_validation_test.go
+++ b/api/v1beta1/hetznerbaremetalmachine_validation_test.go
@@ -267,7 +267,6 @@ func TestValidateHetznerBareMetalMachineSpecUpdate(t *testing.T) {
},
},
PortAfterInstallImage: 22,
- PortAfterCloudInit: 22,
},
},
newSpec: HetznerBareMetalMachineSpec{
@@ -281,7 +280,6 @@ func TestValidateHetznerBareMetalMachineSpecUpdate(t *testing.T) {
},
},
PortAfterInstallImage: 2222,
- PortAfterCloudInit: 2222,
},
},
},
@@ -295,7 +293,6 @@ func TestValidateHetznerBareMetalMachineSpecUpdate(t *testing.T) {
},
},
PortAfterInstallImage: 2222,
- PortAfterCloudInit: 2222,
}, "sshSpec immutable"),
},
{
@@ -363,7 +360,6 @@ func TestValidateHetznerBareMetalMachineSpecUpdate(t *testing.T) {
},
},
PortAfterInstallImage: 22,
- PortAfterCloudInit: 22,
},
HostSelector: HostSelector{
MatchLabels: map[string]string{
@@ -395,7 +391,6 @@ func TestValidateHetznerBareMetalMachineSpecUpdate(t *testing.T) {
},
},
PortAfterInstallImage: 22,
- PortAfterCloudInit: 22,
},
HostSelector: HostSelector{
MatchLabels: map[string]string{
diff --git a/api/v1beta1/hetznerbaremetalmachine_webhook.go b/api/v1beta1/hetznerbaremetalmachine_webhook.go
index 1fc4e5fbb..42054aa74 100644
--- a/api/v1beta1/hetznerbaremetalmachine_webhook.go
+++ b/api/v1beta1/hetznerbaremetalmachine_webhook.go
@@ -17,51 +17,67 @@ limitations under the License.
package v1beta1
import (
+ "context"
+ "fmt"
+
"k8s.io/apimachinery/pkg/runtime"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/webhook"
"sigs.k8s.io/controller-runtime/pkg/webhook/admission"
)
+type hetznerBareMetalMachineWebhook struct{}
+
// SetupWebhookWithManager initializes webhook manager for HetznerBareMetalMachine.
-func (bmMachine *HetznerBareMetalMachine) SetupWebhookWithManager(mgr ctrl.Manager) error {
+func (hbmm *HetznerBareMetalMachine) SetupWebhookWithManager(mgr ctrl.Manager) error {
+ w := new(hetznerBareMetalMachineWebhook)
return ctrl.NewWebhookManagedBy(mgr).
- For(bmMachine).
+ For(hbmm).
+ WithValidator(w).
+ WithDefaulter(w).
Complete()
}
//+kubebuilder:webhook:path=/mutate-infrastructure-cluster-x-k8s-io-v1beta1-hetznerbaremetalmachine,mutating=true,failurePolicy=fail,sideEffects=None,groups=infrastructure.cluster.x-k8s.io,resources=hetznerbaremetalmachines,verbs=create;update,versions=v1beta1,name=mutation.hetznerbaremetalmachine.infrastructure.cluster.x-k8s.io,admissionReviewVersions={v1,v1beta1}
-var _ webhook.Defaulter = &HetznerBareMetalMachine{}
+var _ webhook.CustomDefaulter = &hetznerBareMetalMachineWebhook{}
-// Default implements webhook.Defaulter so a webhook will be registered for the type.
-func (bmMachine *HetznerBareMetalMachine) Default() {}
+// Default implements webhook.CustomDefaulter so a webhook will be registered for the type.
+func (*hetznerBareMetalMachineWebhook) Default(_ context.Context, _ runtime.Object) error {
+ return nil
+}
//+kubebuilder:webhook:path=/validate-infrastructure-cluster-x-k8s-io-v1beta1-hetznerbaremetalmachine,mutating=false,failurePolicy=fail,sideEffects=None,groups=infrastructure.cluster.x-k8s.io,resources=hetznerbaremetalmachines,verbs=create;update,versions=v1beta1,name=validation.hetznerbaremetalmachine.infrastructure.cluster.x-k8s.io,admissionReviewVersions={v1,v1beta1}
-var _ webhook.Validator = &HetznerBareMetalMachine{}
+var _ webhook.CustomValidator = &hetznerBareMetalMachineWebhook{}
-// ValidateCreate implements webhook.Validator so a webhook will be registered for the type.
-func (bmMachine *HetznerBareMetalMachine) ValidateCreate() (admission.Warnings, error) {
- if bmMachine.Spec.SSHSpec.PortAfterCloudInit == 0 {
- bmMachine.Spec.SSHSpec.PortAfterCloudInit = bmMachine.Spec.SSHSpec.PortAfterInstallImage
+// ValidateCreate implements webhook.CustomValidator so a webhook will be registered for the type.
+func (*hetznerBareMetalMachineWebhook) ValidateCreate(_ context.Context, obj runtime.Object) (admission.Warnings, error) {
+ r, ok := obj.(*HetznerBareMetalMachine)
+ if !ok {
+ return nil, fmt.Errorf("expected an HetznerBareMetalMachine object but got %T", r)
}
- allErrs := validateHetznerBareMetalMachineSpecCreate(bmMachine.Spec)
+ allErrs := validateHetznerBareMetalMachineSpecCreate(r.Spec)
- return nil, aggregateObjErrors(bmMachine.GroupVersionKind().GroupKind(), bmMachine.Name, allErrs)
+ return nil, aggregateObjErrors(r.GroupVersionKind().GroupKind(), r.Name, allErrs)
}
-// ValidateUpdate implements webhook.Validator so a webhook will be registered for the type.
-func (bmMachine *HetznerBareMetalMachine) ValidateUpdate(old runtime.Object) (admission.Warnings, error) {
- oldHetznerBareMetalMachine := old.(*HetznerBareMetalMachine)
+// ValidateUpdate implements webhook.CustomValidator so a webhook will be registered for the type.
+func (*hetznerBareMetalMachineWebhook) ValidateUpdate(_ context.Context, oldObj runtime.Object, newObj runtime.Object) (admission.Warnings, error) {
+ r, ok := newObj.(*HetznerBareMetalMachine)
+ if !ok {
+ return nil, fmt.Errorf("expected an HetznerBareMetalMachine object but got %T", r)
+ }
+
+ oldHetznerBareMetalMachine := oldObj.(*HetznerBareMetalMachine)
- allErrs := validateHetznerBareMetalMachineSpecUpdate(oldHetznerBareMetalMachine.Spec, bmMachine.Spec)
+ allErrs := validateHetznerBareMetalMachineSpecUpdate(oldHetznerBareMetalMachine.Spec, r.Spec)
- return nil, aggregateObjErrors(bmMachine.GroupVersionKind().GroupKind(), bmMachine.Name, allErrs)
+ return nil, aggregateObjErrors(r.GroupVersionKind().GroupKind(), r.Name, allErrs)
}
-// ValidateDelete implements webhook.Validator so a webhook will be registered for the type.
-func (bmMachine *HetznerBareMetalMachine) ValidateDelete() (admission.Warnings, error) {
+// ValidateDelete implements webhook.CustomValidator so a webhook will be registered for the type.
+func (*hetznerBareMetalMachineWebhook) ValidateDelete(_ context.Context, _ runtime.Object) (admission.Warnings, error) {
return nil, nil
}
diff --git a/api/v1beta1/hetznerbaremetalmachinetemplate_webhook.go b/api/v1beta1/hetznerbaremetalmachinetemplate_webhook.go
index e8c4ec7e1..b3ae3121b 100644
--- a/api/v1beta1/hetznerbaremetalmachinetemplate_webhook.go
+++ b/api/v1beta1/hetznerbaremetalmachinetemplate_webhook.go
@@ -31,10 +31,12 @@ import (
)
// SetupWebhookWithManager initializes webhook manager for HetznerBareMetalMachineTemplate.
-func (r *HetznerBareMetalMachineTemplateWebhook) SetupWebhookWithManager(mgr ctrl.Manager) error {
+func (r *HetznerBareMetalMachineTemplate) SetupWebhookWithManager(mgr ctrl.Manager) error {
+ w := new(HetznerBareMetalMachineTemplateWebhook)
return ctrl.NewWebhookManagedBy(mgr).
- For(&HetznerBareMetalMachineTemplate{}).
- WithValidator(r).
+ For(r).
+ WithValidator(w).
+ WithDefaulter(w).
Complete()
}
@@ -42,27 +44,24 @@ func (r *HetznerBareMetalMachineTemplateWebhook) SetupWebhookWithManager(mgr ctr
// +kubebuilder:object:generate=false
type HetznerBareMetalMachineTemplateWebhook struct{}
+// Default implements admission.CustomDefaulter.
+func (*HetznerBareMetalMachineTemplateWebhook) Default(_ context.Context, _ runtime.Object) error {
+ return nil
+}
+
//+kubebuilder:webhook:path=/validate-infrastructure-cluster-x-k8s-io-v1beta1-hetznerbaremetalmachinetemplate,mutating=false,sideEffects=None,failurePolicy=fail,sideEffects=None,groups=infrastructure.cluster.x-k8s.io,resources=hetznerbaremetalmachinetemplates,verbs=create;update,versions=v1beta1,name=validation.hetznerbaremetalmachinetemplate.infrastructure.cluster.x-k8s.io,admissionReviewVersions={v1,v1beta1}
var _ webhook.CustomValidator = &HetznerBareMetalMachineTemplateWebhook{}
+var _ webhook.CustomDefaulter = &HetznerBareMetalMachineTemplateWebhook{}
-// ValidateCreate implements webhook.Validator so a webhook will be registered for the type.
-func (r *HetznerBareMetalMachineTemplateWebhook) ValidateCreate(_ context.Context, raw runtime.Object) (admission.Warnings, error) {
- hbmmt, ok := raw.(*HetznerBareMetalMachineTemplate)
- if !ok {
- return nil, apierrors.NewBadRequest(fmt.Sprintf("expected a HetznerBareMetalMachineTemplate but got a %T", raw))
- }
-
- if hbmmt.Spec.Template.Spec.SSHSpec.PortAfterCloudInit == 0 {
- hbmmt.Spec.Template.Spec.SSHSpec.PortAfterCloudInit = hbmmt.Spec.Template.Spec.SSHSpec.PortAfterInstallImage
- }
-
+// ValidateCreate implements webhook.CustomValidator so a webhook will be registered for the type.
+func (r *HetznerBareMetalMachineTemplateWebhook) ValidateCreate(_ context.Context, _ runtime.Object) (admission.Warnings, error) {
// TODO: Cannot validate it because ClusterClass applies empty template objects
// allErrs := validateHetznerBareMetalMachineSpecCreate(hbmmt.Spec.Template.Spec)
return nil, nil
}
-// ValidateUpdate implements webhook.Validator so a webhook will be registered for the type.
+// ValidateUpdate implements webhook.CustomValidator so a webhook will be registered for the type.
func (r *HetznerBareMetalMachineTemplateWebhook) ValidateUpdate(ctx context.Context, oldRaw runtime.Object, newRaw runtime.Object) (admission.Warnings, error) {
newHetznerBareMetalMachineTemplate, ok := newRaw.(*HetznerBareMetalMachineTemplate)
if !ok {
@@ -86,7 +85,7 @@ func (r *HetznerBareMetalMachineTemplateWebhook) ValidateUpdate(ctx context.Cont
return nil, aggregateObjErrors(newHetznerBareMetalMachineTemplate.GroupVersionKind().GroupKind(), newHetznerBareMetalMachineTemplate.Name, allErrs)
}
-// ValidateDelete implements webhook.Validator so a webhook will be registered for the type.
+// ValidateDelete implements webhook.CustomValidator so a webhook will be registered for the type.
func (r *HetznerBareMetalMachineTemplateWebhook) ValidateDelete(_ context.Context, _ runtime.Object) (admission.Warnings, error) {
return nil, nil
}
diff --git a/api/v1beta1/hetznerbaremetalremediation_webhook.go b/api/v1beta1/hetznerbaremetalremediation_webhook.go
index cc34f5b93..6bc39ef9b 100644
--- a/api/v1beta1/hetznerbaremetalremediation_webhook.go
+++ b/api/v1beta1/hetznerbaremetalremediation_webhook.go
@@ -17,42 +17,50 @@ limitations under the License.
package v1beta1
import (
+ "context"
+
"k8s.io/apimachinery/pkg/runtime"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/webhook"
"sigs.k8s.io/controller-runtime/pkg/webhook/admission"
)
+type hetznerBareMetalRemediationWebhook struct{}
+
// SetupWebhookWithManager initializes webhook manager for HetznerBareMetalRemediation.
func (r *HetznerBareMetalRemediation) SetupWebhookWithManager(mgr ctrl.Manager) error {
+ w := new(hetznerBareMetalRemediationWebhook)
return ctrl.NewWebhookManagedBy(mgr).
For(r).
+ WithValidator(w).
+ WithDefaulter(w).
Complete()
}
//+kubebuilder:webhook:path=/mutate-infrastructure-cluster-x-k8s-io-v1beta1-hetznerbaremetalremediation,mutating=true,failurePolicy=fail,sideEffects=None,groups=infrastructure.cluster.x-k8s.io,resources=hetznerbaremetalremediations,verbs=create;update,versions=v1beta1,name=mutation.hetznerbaremetalremediation.infrastructure.cluster.x-k8s.io,admissionReviewVersions={v1,v1beta1}
-var _ webhook.Defaulter = &HetznerBareMetalRemediation{}
+var _ webhook.CustomDefaulter = &hetznerBareMetalRemediationWebhook{}
-// Default implements webhook.Defaulter so a webhook will be registered for the type.
-func (r *HetznerBareMetalRemediation) Default() {
+// Default implements webhook.CustomDefaulter so a webhook will be registered for the type.
+func (*hetznerBareMetalRemediationWebhook) Default(_ context.Context, _ runtime.Object) error {
+ return nil
}
//+kubebuilder:webhook:path=/validate-infrastructure-cluster-x-k8s-io-v1beta1-hetznerbaremetalremediation,mutating=false,failurePolicy=fail,sideEffects=None,groups=infrastructure.cluster.x-k8s.io,resources=hetznerbaremetalremediations,verbs=create;update,versions=v1beta1,name=validation.hetznerbaremetalremediation.infrastructure.cluster.x-k8s.io,admissionReviewVersions={v1,v1beta1}
-var _ webhook.Validator = &HetznerBareMetalRemediation{}
+var _ webhook.CustomValidator = &hetznerBareMetalRemediationWebhook{}
-// ValidateCreate implements webhook.Validator so a webhook will be registered for the type.
-func (r *HetznerBareMetalRemediation) ValidateCreate() (admission.Warnings, error) {
+// ValidateCreate implements webhook.CustomValidator so a webhook will be registered for the type.
+func (*hetznerBareMetalRemediationWebhook) ValidateCreate(_ context.Context, _ runtime.Object) (admission.Warnings, error) {
return nil, nil
}
-// ValidateUpdate implements webhook.Validator so a webhook will be registered for the type.
-func (r *HetznerBareMetalRemediation) ValidateUpdate(runtime.Object) (admission.Warnings, error) {
+// ValidateUpdate implements webhook.CustomValidator so a webhook will be registered for the type.
+func (*hetznerBareMetalRemediationWebhook) ValidateUpdate(_ context.Context, _, _ runtime.Object) (admission.Warnings, error) {
return nil, nil
}
-// ValidateDelete implements webhook.Validator so a webhook will be registered for the type.
-func (r *HetznerBareMetalRemediation) ValidateDelete() (admission.Warnings, error) {
+// ValidateDelete implements webhook.CustomValidator so a webhook will be registered for the type.
+func (*hetznerBareMetalRemediationWebhook) ValidateDelete(_ context.Context, _ runtime.Object) (admission.Warnings, error) {
return nil, nil
}
diff --git a/api/v1beta1/hetznerbaremetalremediationtemplate_webhook.go b/api/v1beta1/hetznerbaremetalremediationtemplate_webhook.go
index abf56cf5b..e797a5f50 100644
--- a/api/v1beta1/hetznerbaremetalremediationtemplate_webhook.go
+++ b/api/v1beta1/hetznerbaremetalremediationtemplate_webhook.go
@@ -17,42 +17,50 @@ limitations under the License.
package v1beta1
import (
+ "context"
+
"k8s.io/apimachinery/pkg/runtime"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/webhook"
"sigs.k8s.io/controller-runtime/pkg/webhook/admission"
)
+type hetznerBareMetalRemediationTemplateWebhook struct{}
+
// SetupWebhookWithManager initializes webhook manager for HetznerBareMetalRemediationTemplate.
func (r *HetznerBareMetalRemediationTemplate) SetupWebhookWithManager(mgr ctrl.Manager) error {
+ w := new(hetznerBareMetalRemediationTemplateWebhook)
return ctrl.NewWebhookManagedBy(mgr).
For(r).
+ WithValidator(w).
+ WithDefaulter(w).
Complete()
}
//+kubebuilder:webhook:path=/mutate-infrastructure-cluster-x-k8s-io-v1beta1-hetznerbaremetalremediationtemplate,mutating=true,failurePolicy=fail,sideEffects=None,groups=infrastructure.cluster.x-k8s.io,resources=hetznerbaremetalremediationtemplates,verbs=create;update,versions=v1beta1,name=mhetznerbaremetalremediationtemplate.kb.io,admissionReviewVersions=v1
-var _ webhook.Defaulter = &HetznerBareMetalRemediationTemplate{}
+var _ webhook.CustomDefaulter = &hetznerBareMetalRemediationTemplateWebhook{}
-// Default implements webhook.Defaulter so a webhook will be registered for the type.
-func (r *HetznerBareMetalRemediationTemplate) Default() {
+// Default implements webhook.CustomDefaulter so a webhook will be registered for the type.
+func (*hetznerBareMetalRemediationTemplateWebhook) Default(_ context.Context, _ runtime.Object) error {
+ return nil
}
//+kubebuilder:webhook:path=/validate-infrastructure-cluster-x-k8s-io-v1beta1-hetznerbaremetalremediationtemplate,mutating=false,failurePolicy=fail,sideEffects=None,groups=infrastructure.cluster.x-k8s.io,resources=hetznerbaremetalremediationtemplates,verbs=create;update,versions=v1beta1,name=vhetznerbaremetalremediationtemplate.kb.io,admissionReviewVersions=v1
-var _ webhook.Validator = &HetznerBareMetalRemediationTemplate{}
+var _ webhook.CustomValidator = &hetznerBareMetalRemediationTemplateWebhook{}
-// ValidateCreate implements webhook.Validator so a webhook will be registered for the type.
-func (r *HetznerBareMetalRemediationTemplate) ValidateCreate() (admission.Warnings, error) {
+// ValidateCreate implements webhook.CustomValidator so a webhook will be registered for the type.
+func (*hetznerBareMetalRemediationTemplateWebhook) ValidateCreate(_ context.Context, _ runtime.Object) (admission.Warnings, error) {
return nil, nil
}
-// ValidateUpdate implements webhook.Validator so a webhook will be registered for the type.
-func (r *HetznerBareMetalRemediationTemplate) ValidateUpdate(runtime.Object) (admission.Warnings, error) {
+// ValidateUpdate implements webhook.CustomValidator so a webhook will be registered for the type.
+func (*hetznerBareMetalRemediationTemplateWebhook) ValidateUpdate(_ context.Context, _, _ runtime.Object) (admission.Warnings, error) {
return nil, nil
}
-// ValidateDelete implements webhook.Validator so a webhook will be registered for the type.
-func (r *HetznerBareMetalRemediationTemplate) ValidateDelete() (admission.Warnings, error) {
+// ValidateDelete implements webhook.CustomValidator so a webhook will be registered for the type.
+func (*hetznerBareMetalRemediationTemplateWebhook) ValidateDelete(_ context.Context, _ runtime.Object) (admission.Warnings, error) {
return nil, nil
}
diff --git a/api/v1beta1/hetznercluster_webhook.go b/api/v1beta1/hetznercluster_webhook.go
index 72c0746e2..5f3655c4d 100644
--- a/api/v1beta1/hetznercluster_webhook.go
+++ b/api/v1beta1/hetznercluster_webhook.go
@@ -17,6 +17,7 @@ limitations under the License.
package v1beta1
import (
+ "context"
"fmt"
"reflect"
@@ -30,6 +31,8 @@ import (
"github.com/syself/cluster-api-provider-hetzner/pkg/utils"
)
+type hetznerClusterWebhook struct{}
+
// log is for logging in this package.
var hetznerclusterlog = utils.GetDefaultLogger("info").WithName("hetznercluster-resource")
@@ -44,8 +47,11 @@ var regionNetworkZoneMap = map[string]string{
// SetupWebhookWithManager initializes webhook manager for HetznerCluster.
func (r *HetznerCluster) SetupWebhookWithManager(mgr ctrl.Manager) error {
+ w := new(hetznerClusterWebhook)
return ctrl.NewWebhookManagedBy(mgr).
For(r).
+ WithValidator(w).
+ WithDefaulter(w).
Complete()
}
@@ -60,17 +66,23 @@ func (r *HetznerClusterList) SetupWebhookWithManager(mgr ctrl.Manager) error {
//+kubebuilder:webhook:path=/mutate-infrastructure-cluster-x-k8s-io-v1beta1-hetznercluster,mutating=true,failurePolicy=fail,sideEffects=None,groups=infrastructure.cluster.x-k8s.io,resources=hetznerclusters,verbs=create;update,versions=v1beta1,name=mutation.hetznercluster.infrastructure.cluster.x-k8s.io,admissionReviewVersions={v1,v1beta1}
-var _ webhook.Defaulter = &HetznerCluster{}
+var _ webhook.CustomDefaulter = &hetznerClusterWebhook{}
-// Default implements webhook.Defaulter so a webhook will be registered for the type.
-func (r *HetznerCluster) Default() {}
+// Default implements webhook.CustomDefaulter so a webhook will be registered for the type.
+func (*hetznerClusterWebhook) Default(_ context.Context, _ runtime.Object) error {
+ return nil
+}
//+kubebuilder:webhook:path=/validate-infrastructure-cluster-x-k8s-io-v1beta1-hetznercluster,mutating=false,failurePolicy=fail,sideEffects=None,groups=infrastructure.cluster.x-k8s.io,resources=hetznerclusters,verbs=create;update,versions=v1beta1,name=validation.hetznercluster.infrastructure.cluster.x-k8s.io,admissionReviewVersions={v1,v1beta1}
-var _ webhook.Validator = &HetznerCluster{}
+var _ webhook.CustomValidator = &hetznerClusterWebhook{}
-// ValidateCreate implements webhook.Validator so a webhook will be registered for the type.
-func (r *HetznerCluster) ValidateCreate() (admission.Warnings, error) {
+// ValidateCreate implements webhook.CustomValidator so a webhook will be registered for the type.
+func (*hetznerClusterWebhook) ValidateCreate(_ context.Context, obj runtime.Object) (admission.Warnings, error) {
+ r, ok := obj.(*HetznerCluster)
+ if !ok {
+ return nil, fmt.Errorf("expected an HetznerCluster object but got %T", r)
+ }
hetznerclusterlog.V(1).Info("validate create", "name", r.Name)
var allErrs field.ErrorList
@@ -151,14 +163,18 @@ func isNetworkZoneSameForAllRegions(regions []Region, defaultNetworkZone *string
return nil
}
-// ValidateUpdate implements webhook.Validator so a webhook will be registered for the type.
-func (r *HetznerCluster) ValidateUpdate(old runtime.Object) (admission.Warnings, error) {
+// ValidateUpdate implements webhook.CustomValidator so a webhook will be registered for the type.
+func (*hetznerClusterWebhook) ValidateUpdate(_ context.Context, oldObj, newObj runtime.Object) (admission.Warnings, error) {
+ r, ok := newObj.(*HetznerCluster)
+ if !ok {
+ return nil, fmt.Errorf("expected an HetznerCluster object but got %T", r)
+ }
hetznerclusterlog.V(1).Info("validate update", "name", r.Name)
var allErrs field.ErrorList
- oldC, ok := old.(*HetznerCluster)
+ oldC, ok := oldObj.(*HetznerCluster)
if !ok {
- return nil, apierrors.NewBadRequest(fmt.Sprintf("expected an HetznerCluster but got a %T", old))
+ return nil, apierrors.NewBadRequest(fmt.Sprintf("expected an HetznerCluster but got a %T", oldObj))
}
// Network settings are immutable
@@ -220,8 +236,7 @@ func (r *HetznerCluster) validateHetznerSecretKey() *field.Error {
return nil
}
-// ValidateDelete implements webhook.Validator so a webhook will be registered for the type.
-func (r *HetznerCluster) ValidateDelete() (admission.Warnings, error) {
- hetznerclusterlog.V(1).Info("validate delete", "name", r.Name)
+// ValidateDelete implements webhook.CustomValidator so a webhook will be registered for the type.
+func (*hetznerClusterWebhook) ValidateDelete(_ context.Context, _ runtime.Object) (admission.Warnings, error) {
return nil, nil
}
diff --git a/api/v1beta1/hetznerclustertemplate_webhook.go b/api/v1beta1/hetznerclustertemplate_webhook.go
index 21351b636..fe4c15e9a 100644
--- a/api/v1beta1/hetznerclustertemplate_webhook.go
+++ b/api/v1beta1/hetznerclustertemplate_webhook.go
@@ -17,6 +17,7 @@ limitations under the License.
package v1beta1
import (
+ "context"
"fmt"
"reflect"
@@ -29,40 +30,48 @@ import (
"github.com/syself/cluster-api-provider-hetzner/pkg/utils"
)
+type hetznerClusterTemplateWebhook struct{}
+
// log is for logging in this package.
var hetznerclustertemplatelog = utils.GetDefaultLogger("info").WithName("hetznerclustertemplate-resource")
// SetupWebhookWithManager initializes webhook manager for HetznerClusterTemplate.
func (r *HetznerClusterTemplate) SetupWebhookWithManager(mgr ctrl.Manager) error {
+ w := new(hetznerClusterTemplateWebhook)
return ctrl.NewWebhookManagedBy(mgr).
For(r).
+ WithValidator(w).
+ WithDefaulter(w).
Complete()
}
// +kubebuilder:webhook:path=/mutate-infrastructure-cluster-x-k8s-io-v1beta1-hetznerclustertemplate,mutating=true,failurePolicy=fail,sideEffects=None,groups=infrastructure.cluster.x-k8s.io,resources=hetznerclustertemplates,verbs=create;update,versions=v1beta1,name=mutation.hetznerclustertemplate.infrastructure.cluster.x-k8s.io,admissionReviewVersions={v1,v1beta1}
-var _ webhook.Defaulter = &HetznerClusterTemplate{}
+var _ webhook.CustomDefaulter = &hetznerClusterTemplateWebhook{}
-// Default implements webhook.Defaulter so a webhook will be registered for the type.
-func (r *HetznerClusterTemplate) Default() {
- hetznerclustertemplatelog.V(1).Info("default", "name", r.Name)
+// Default implements webhook.CustomDefaulter so a webhook will be registered for the type.
+func (*hetznerClusterTemplateWebhook) Default(_ context.Context, _ runtime.Object) error {
+ return nil
}
// +kubebuilder:webhook:path=/validate-infrastructure-cluster-x-k8s-io-v1beta1-hetznerclustertemplate,mutating=false,failurePolicy=fail,sideEffects=None,groups=infrastructure.cluster.x-k8s.io,resources=hetznerclustertemplates,verbs=create;update,versions=v1beta1,name=validation.hetznerclustertemplate.infrastructure.cluster.x-k8s.io,admissionReviewVersions={v1,v1beta1}
-var _ webhook.Validator = &HetznerClusterTemplate{}
+var _ webhook.CustomValidator = &hetznerClusterTemplateWebhook{}
-// ValidateCreate implements webhook.Validator so a webhook will be registered for the type.
-func (r *HetznerClusterTemplate) ValidateCreate() (admission.Warnings, error) {
- hetznerclustertemplatelog.V(1).Info("validate create", "name", r.Name)
+// ValidateCreate implements webhook.CustomValidator so a webhook will be registered for the type.
+func (*hetznerClusterTemplateWebhook) ValidateCreate(_ context.Context, _ runtime.Object) (admission.Warnings, error) {
return nil, nil
}
-// ValidateUpdate implements webhook.Validator so a webhook will be registered for the type.
-func (r *HetznerClusterTemplate) ValidateUpdate(oldRaw runtime.Object) (admission.Warnings, error) {
+// ValidateUpdate implements webhook.CustomValidator so a webhook will be registered for the type.
+func (*hetznerClusterTemplateWebhook) ValidateUpdate(_ context.Context, oldObj, newObj runtime.Object) (admission.Warnings, error) {
+ r, ok := newObj.(*HetznerClusterTemplate)
+ if !ok {
+ return nil, fmt.Errorf("expected an HetznerClusterTemplate object but got %T", r)
+ }
hetznerclustertemplatelog.V(1).Info("validate update", "name", r.Name)
- old, ok := oldRaw.(*HetznerClusterTemplate)
+ old, ok := oldObj.(*HetznerClusterTemplate)
if !ok {
- return nil, apierrors.NewBadRequest(fmt.Sprintf("expected an HetznerClusterTemplate but got a %T", oldRaw))
+ return nil, apierrors.NewBadRequest(fmt.Sprintf("expected an HetznerClusterTemplate but got a %T", oldObj))
}
if !reflect.DeepEqual(r.Spec, old.Spec) {
@@ -71,8 +80,7 @@ func (r *HetznerClusterTemplate) ValidateUpdate(oldRaw runtime.Object) (admissio
return nil, nil
}
-// ValidateDelete implements webhook.Validator so a webhook will be registered for the type.
-func (r *HetznerClusterTemplate) ValidateDelete() (admission.Warnings, error) {
- hetznerclustertemplatelog.V(1).Info("validate delete", "name", r.Name)
+// ValidateDelete implements webhook.CustomValidator so a webhook will be registered for the type.
+func (*hetznerClusterTemplateWebhook) ValidateDelete(_ context.Context, _ runtime.Object) (admission.Warnings, error) {
return nil, nil
}
diff --git a/api/v1beta1/types.go b/api/v1beta1/types.go
index f574a7942..b08c598a2 100644
--- a/api/v1beta1/types.go
+++ b/api/v1beta1/types.go
@@ -263,3 +263,33 @@ func (s *HCloudNetworkSpec) IsZero() bool {
}
return true
}
+
+// HCloudBootState defines the boot state of an HCloud server.
+type HCloudBootState string
+
+const (
+ // HCloudBootStateUnset is the initial state when the boot state has not been set yet.
+ HCloudBootStateUnset HCloudBootState = ""
+
+ // HCloudBootStateInitializing indicates that the controller waits for PreRescueOS.
+ // When it is available, then the rescue system gets enabled.
+ HCloudBootStateInitializing HCloudBootState = "Initializing"
+
+ // HCloudBootStateEnablingRescue indicates that the controller waits for the rescue system to be enabled. Then the server gets booted into the rescue system.
+ HCloudBootStateEnablingRescue HCloudBootState = "EnablingRescue"
+
+ // HCloudBootStateBootingToRescue indicates that the controller
+ // waits for the rescue system to be reachable. Then it starts the image-url-command.
+ HCloudBootStateBootingToRescue HCloudBootState = "BootingToRescue"
+
+ // HCloudBootStateRunningImageCommand indicates the controller waits for the
+ // image-url-command, and then switches BootState to BootingToRealOS (no additional reboot gets
+ // done).
+ HCloudBootStateRunningImageCommand HCloudBootState = "RunningImageCommand"
+
+ // HCloudBootStateBootingToRealOS indicates that the server is booting the operating system.
+ HCloudBootStateBootingToRealOS HCloudBootState = "BootingToRealOS"
+
+ // HCloudBootStateOperatingSystemRunning indicates that the server is successfully running.
+ HCloudBootStateOperatingSystemRunning HCloudBootState = "OperatingSystemRunning"
+)
diff --git a/api/v1beta1/zz_generated.deepcopy.go b/api/v1beta1/zz_generated.deepcopy.go
index fa34fdc2d..ab12dee41 100644
--- a/api/v1beta1/zz_generated.deepcopy.go
+++ b/api/v1beta1/zz_generated.deepcopy.go
@@ -104,6 +104,7 @@ func (in *ControllerGeneratedStatus) DeepCopyInto(out *ControllerGeneratedStatus
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
+ in.ExternalIDs.DeepCopyInto(&out.ExternalIDs)
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ControllerGeneratedStatus.
@@ -116,6 +117,22 @@ func (in *ControllerGeneratedStatus) DeepCopy() *ControllerGeneratedStatus {
return out
}
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ExternalIDs) DeepCopyInto(out *ExternalIDs) {
+ *out = *in
+ in.RebootAnnotationSince.DeepCopyInto(&out.RebootAnnotationSince)
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExternalIDs.
+func (in *ExternalIDs) DeepCopy() *ExternalIDs {
+ if in == nil {
+ return nil
+ }
+ out := new(ExternalIDs)
+ in.DeepCopyInto(out)
+ return out
+}
+
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *HCloudMachine) DeepCopyInto(out *HCloudMachine) {
*out = *in
@@ -245,6 +262,8 @@ func (in *HCloudMachineStatus) DeepCopyInto(out *HCloudMachineStatus) {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
+ in.BootStateSince.DeepCopyInto(&out.BootStateSince)
+ out.ExternalIDs = in.ExternalIDs
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HCloudMachineStatus.
@@ -257,6 +276,21 @@ func (in *HCloudMachineStatus) DeepCopy() *HCloudMachineStatus {
return out
}
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *HCloudMachineStatusExternalIDs) DeepCopyInto(out *HCloudMachineStatusExternalIDs) {
+ *out = *in
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HCloudMachineStatusExternalIDs.
+func (in *HCloudMachineStatusExternalIDs) DeepCopy() *HCloudMachineStatusExternalIDs {
+ if in == nil {
+ return nil
+ }
+ out := new(HCloudMachineStatusExternalIDs)
+ in.DeepCopyInto(out)
+ return out
+}
+
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *HCloudMachineTemplate) DeepCopyInto(out *HCloudMachineTemplate) {
*out = *in
diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_hcloudmachines.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_hcloudmachines.yaml
index c063cd9eb..b8767595c 100644
--- a/config/crd/bases/infrastructure.cluster.x-k8s.io_hcloudmachines.yaml
+++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_hcloudmachines.yaml
@@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
- controller-gen.kubebuilder.io/version: v0.14.0
+ controller-gen.kubebuilder.io/version: v0.18.0
name: hcloudmachines.infrastructure.cluster.x-k8s.io
spec:
group: infrastructure.cluster.x-k8s.io
@@ -72,6 +72,29 @@ spec:
It can reference an image uploaded to Hetzner API in two ways: either directly as the name of an image or as the label of an image.
minLength: 1
type: string
+ imageURL:
+ description: |-
+ ImageURL gets used for installing custom node images. If that field is set, the controller
+ boots a new HCloud machine into rescue mode. Then the script provided by
+ --hcloud-image-url-command (which you need to provide to the controller binary) will be
+ copied into the rescue system and executed.
+
+ The controller uses url.ParseRequestURI (Go function) to validate the URL.
+
+ It is up to the script to provision the disk of the hcloud machine accordingly. The process
+ is considered successful if the last line in the output contains
+ IMAGE_URL_DONE. If the script terminates with a different last line, then
+ the process is considered to have failed.
+
+ A Kubernetes event will be created in both (success, failure) cases containing the output
+ (stdout and stderr) of the script. If the script takes longer than 7 minutes, the
+ controller cancels the provisioning.
+
+ Docs: https://syself.com/docs/caph/developers/image-url-command
+
+ ImageURL is mutually exclusive to "ImageName".
+ minLength: 1
+ type: string
placementGroupName:
description: PlacementGroupName defines the placement group of the
machine in HCloud API that must reference an existing placement
@@ -117,47 +140,21 @@ spec:
type: object
type: array
type:
- description: 'Type is the HCloud Machine Type for this machine. It
- defines the desired server type of server in Hetzner''s Cloud API.
- Example: cpx11.'
- enum:
- - cpx11
- - cx21
- - cpx21
- - cx31
- - cpx31
- - cx41
- - cpx41
- - cx51
- - cpx51
- - ccx11
- - ccx12
- - ccx13
- - ccx21
- - ccx22
- - ccx23
- - ccx31
- - ccx32
- - ccx33
- - ccx41
- - ccx42
- - ccx43
- - ccx51
- - ccx52
- - ccx53
- - ccx62
- - ccx63
- - cax11
- - cax21
- - cax31
- - cax41
- - cx22
- - cx32
- - cx42
- - cx52
+ description: |-
+ Type is the HCloud Machine Type for this machine. It defines the desired server type of
+ server in Hetzner's Cloud API. You can use the hcloud CLI to get server names (`hcloud
+ server-type list`) or on https://www.hetzner.com/cloud
+
+ The types follow this pattern: cxNV (shared, cheap), cpxNV (shared, performance), ccxNV
+ (dedicated), caxNV (ARM)
+
+ N is a number, and V is the version of this machine type. Example: cpx32.
+
+ The list of valid machine types gets changed by Hetzner from time to time. CAPH no longer
+ validates this string. It is up to you to use a valid type. Not all types are available in all
+ locations.
type: string
required:
- - imageName
- type
type: object
status:
@@ -170,17 +167,34 @@ spec:
address.
properties:
address:
- description: The machine address.
+ description: address is the machine address.
+ maxLength: 256
+ minLength: 1
type: string
type:
- description: Machine address type, one of Hostname, ExternalIP,
- InternalIP, ExternalDNS or InternalDNS.
+ description: type is the machine address type, one of Hostname,
+ ExternalIP, InternalIP, ExternalDNS or InternalDNS.
+ enum:
+ - Hostname
+ - ExternalIP
+ - InternalIP
+ - ExternalDNS
+ - InternalDNS
type: string
required:
- address
- type
type: object
type: array
+ bootState:
+ description: HCloudBootState defines the boot state of an HCloud server.
+ type: string
+ bootStateSince:
+ description: |-
+ BootStateSince is the timestamp of the last change to BootState. It is used to timeout
+ provisioning if a state takes too long.
+ format: date-time
+ type: string
conditions:
description: Conditions define the current service state of the HCloudMachine.
items:
@@ -189,36 +203,43 @@ spec:
properties:
lastTransitionTime:
description: |-
- Last time the condition transitioned from one status to another.
+ lastTransitionTime is the last time the condition transitioned from one status to another.
This should be when the underlying condition changed. If that is not known, then using the time when
the API field changed is acceptable.
format: date-time
type: string
message:
description: |-
- A human readable message indicating details about the transition.
+ message is a human readable message indicating details about the transition.
This field may be empty.
+ maxLength: 10240
+ minLength: 1
type: string
reason:
description: |-
- The reason for the condition's last transition in CamelCase.
+ reason is the reason for the condition's last transition in CamelCase.
The specific API may choose whether or not this field is considered a guaranteed API.
- This field may not be empty.
+ This field may be empty.
+ maxLength: 256
+ minLength: 1
type: string
severity:
description: |-
- Severity provides an explicit classification of Reason code, so the users or machines can immediately
+ severity provides an explicit classification of Reason code, so the users or machines can immediately
understand the current situation and act accordingly.
The Severity field MUST be set only when Status=False.
+ maxLength: 32
type: string
status:
- description: Status of the condition, one of True, False, Unknown.
+ description: status of the condition, one of True, False, Unknown.
type: string
type:
description: |-
- Type of condition in CamelCase or in foo.example.com/CamelCase.
+ type of condition in CamelCase or in foo.example.com/CamelCase.
Many .condition.type values are consistent across resources like Available, but because arbitrary conditions
can be useful (see .node.status.conditions), the ability to deconflict is important.
+ maxLength: 256
+ minLength: 1
type: string
required:
- lastTransitionTime
@@ -226,6 +247,16 @@ spec:
- type
type: object
type: array
+ externalIDs:
+ description: ExternalIDs contains temporary data during the provisioning
+ process
+ properties:
+ actionIdEnableRescueSystem:
+ description: ActionIDEnableRescueSystem is the hcloud API Action
+ result of EnableRescueSystem.
+ format: int64
+ type: integer
+ type: object
failureMessage:
description: |-
FailureMessage will be set in the event that there is a terminal problem
diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_hcloudmachinetemplates.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_hcloudmachinetemplates.yaml
index 9ee936e9b..0ab77daef 100644
--- a/config/crd/bases/infrastructure.cluster.x-k8s.io_hcloudmachinetemplates.yaml
+++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_hcloudmachinetemplates.yaml
@@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
- controller-gen.kubebuilder.io/version: v0.14.0
+ controller-gen.kubebuilder.io/version: v0.18.0
name: hcloudmachinetemplates.infrastructure.cluster.x-k8s.io
spec:
group: infrastructure.cluster.x-k8s.io
@@ -74,7 +74,7 @@ spec:
additionalProperties:
type: string
description: |-
- Annotations is an unstructured key value map stored with a resource that may be
+ annotations is an unstructured key value map stored with a resource that may be
set by external tools to store and retrieve arbitrary metadata. They are not
queryable and should be preserved when modifying objects.
More info: http://kubernetes.io/docs/user-guide/annotations
@@ -83,7 +83,7 @@ spec:
additionalProperties:
type: string
description: |-
- Map of string keys and values that can be used to organize and categorize
+ labels is a map of string keys and values that can be used to organize and categorize
(scope and select) objects. May match selectors of replication controllers
and services.
More info: http://kubernetes.io/docs/user-guide/labels
@@ -99,6 +99,29 @@ spec:
It can reference an image uploaded to Hetzner API in two ways: either directly as the name of an image or as the label of an image.
minLength: 1
type: string
+ imageURL:
+ description: |-
+ ImageURL gets used for installing custom node images. If that field is set, the controller
+ boots a new HCloud machine into rescue mode. Then the script provided by
+ --hcloud-image-url-command (which you need to provide to the controller binary) will be
+ copied into the rescue system and executed.
+
+ The controller uses url.ParseRequestURI (Go function) to validate the URL.
+
+ It is up to the script to provision the disk of the hcloud machine accordingly. The process
+ is considered successful if the last line in the output contains
+ IMAGE_URL_DONE. If the script terminates with a different last line, then
+ the process is considered to have failed.
+
+ A Kubernetes event will be created in both (success, failure) cases containing the output
+ (stdout and stderr) of the script. If the script takes longer than 7 minutes, the
+ controller cancels the provisioning.
+
+ Docs: https://syself.com/docs/caph/developers/image-url-command
+
+ ImageURL is mutually exclusive to "ImageName".
+ minLength: 1
+ type: string
placementGroupName:
description: PlacementGroupName defines the placement group
of the machine in HCloud API that must reference an existing
@@ -144,47 +167,21 @@ spec:
type: object
type: array
type:
- description: 'Type is the HCloud Machine Type for this machine.
- It defines the desired server type of server in Hetzner''s
- Cloud API. Example: cpx11.'
- enum:
- - cpx11
- - cx21
- - cpx21
- - cx31
- - cpx31
- - cx41
- - cpx41
- - cx51
- - cpx51
- - ccx11
- - ccx12
- - ccx13
- - ccx21
- - ccx22
- - ccx23
- - ccx31
- - ccx32
- - ccx33
- - ccx41
- - ccx42
- - ccx43
- - ccx51
- - ccx52
- - ccx53
- - ccx62
- - ccx63
- - cax11
- - cax21
- - cax31
- - cax41
- - cx22
- - cx32
- - cx42
- - cx52
+ description: |-
+ Type is the HCloud Machine Type for this machine. It defines the desired server type of
+ server in Hetzner's Cloud API. You can use the hcloud CLI to get server names (`hcloud
+ server-type list`) or on https://www.hetzner.com/cloud
+
+ The types follow this pattern: cxNV (shared, cheap), cpxNV (shared, performance), ccxNV
+ (dedicated), caxNV (ARM)
+
+ N is a number, and V is the version of this machine type. Example: cpx32.
+
+ The list of valid machine types gets changed by Hetzner from time to time. CAPH no longer
+ validates this string. It is up to you to use a valid type. Not all types are available in all
+ locations.
type: string
required:
- - imageName
- type
type: object
required:
@@ -217,36 +214,43 @@ spec:
properties:
lastTransitionTime:
description: |-
- Last time the condition transitioned from one status to another.
+ lastTransitionTime is the last time the condition transitioned from one status to another.
This should be when the underlying condition changed. If that is not known, then using the time when
the API field changed is acceptable.
format: date-time
type: string
message:
description: |-
- A human readable message indicating details about the transition.
+ message is a human readable message indicating details about the transition.
This field may be empty.
+ maxLength: 10240
+ minLength: 1
type: string
reason:
description: |-
- The reason for the condition's last transition in CamelCase.
+ reason is the reason for the condition's last transition in CamelCase.
The specific API may choose whether or not this field is considered a guaranteed API.
- This field may not be empty.
+ This field may be empty.
+ maxLength: 256
+ minLength: 1
type: string
severity:
description: |-
- Severity provides an explicit classification of Reason code, so the users or machines can immediately
+ severity provides an explicit classification of Reason code, so the users or machines can immediately
understand the current situation and act accordingly.
The Severity field MUST be set only when Status=False.
+ maxLength: 32
type: string
status:
- description: Status of the condition, one of True, False, Unknown.
+ description: status of the condition, one of True, False, Unknown.
type: string
type:
description: |-
- Type of condition in CamelCase or in foo.example.com/CamelCase.
+ type of condition in CamelCase or in foo.example.com/CamelCase.
Many .condition.type values are consistent across resources like Available, but because arbitrary conditions
can be useful (see .node.status.conditions), the ability to deconflict is important.
+ maxLength: 256
+ minLength: 1
type: string
required:
- lastTransitionTime
diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_hcloudremediations.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_hcloudremediations.yaml
index 9b9e698a7..7e27cab22 100644
--- a/config/crd/bases/infrastructure.cluster.x-k8s.io_hcloudremediations.yaml
+++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_hcloudremediations.yaml
@@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
- controller-gen.kubebuilder.io/version: v0.14.0
+ controller-gen.kubebuilder.io/version: v0.18.0
name: hcloudremediations.infrastructure.cluster.x-k8s.io
spec:
group: infrastructure.cluster.x-k8s.io
@@ -103,36 +103,43 @@ spec:
properties:
lastTransitionTime:
description: |-
- Last time the condition transitioned from one status to another.
+ lastTransitionTime is the last time the condition transitioned from one status to another.
This should be when the underlying condition changed. If that is not known, then using the time when
the API field changed is acceptable.
format: date-time
type: string
message:
description: |-
- A human readable message indicating details about the transition.
+ message is a human readable message indicating details about the transition.
This field may be empty.
+ maxLength: 10240
+ minLength: 1
type: string
reason:
description: |-
- The reason for the condition's last transition in CamelCase.
+ reason is the reason for the condition's last transition in CamelCase.
The specific API may choose whether or not this field is considered a guaranteed API.
- This field may not be empty.
+ This field may be empty.
+ maxLength: 256
+ minLength: 1
type: string
severity:
description: |-
- Severity provides an explicit classification of Reason code, so the users or machines can immediately
+ severity provides an explicit classification of Reason code, so the users or machines can immediately
understand the current situation and act accordingly.
The Severity field MUST be set only when Status=False.
+ maxLength: 32
type: string
status:
- description: Status of the condition, one of True, False, Unknown.
+ description: status of the condition, one of True, False, Unknown.
type: string
type:
description: |-
- Type of condition in CamelCase or in foo.example.com/CamelCase.
+ type of condition in CamelCase or in foo.example.com/CamelCase.
Many .condition.type values are consistent across resources like Available, but because arbitrary conditions
can be useful (see .node.status.conditions), the ability to deconflict is important.
+ maxLength: 256
+ minLength: 1
type: string
required:
- lastTransitionTime
diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_hcloudremediationtemplates.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_hcloudremediationtemplates.yaml
index 4c678459d..f33a484ca 100644
--- a/config/crd/bases/infrastructure.cluster.x-k8s.io_hcloudremediationtemplates.yaml
+++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_hcloudremediationtemplates.yaml
@@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
- controller-gen.kubebuilder.io/version: v0.14.0
+ controller-gen.kubebuilder.io/version: v0.18.0
name: hcloudremediationtemplates.infrastructure.cluster.x-k8s.io
spec:
group: infrastructure.cluster.x-k8s.io
@@ -109,37 +109,44 @@ spec:
properties:
lastTransitionTime:
description: |-
- Last time the condition transitioned from one status to another.
+ lastTransitionTime is the last time the condition transitioned from one status to another.
This should be when the underlying condition changed. If that is not known, then using the time when
the API field changed is acceptable.
format: date-time
type: string
message:
description: |-
- A human readable message indicating details about the transition.
+ message is a human readable message indicating details about the transition.
This field may be empty.
+ maxLength: 10240
+ minLength: 1
type: string
reason:
description: |-
- The reason for the condition's last transition in CamelCase.
+ reason is the reason for the condition's last transition in CamelCase.
The specific API may choose whether or not this field is considered a guaranteed API.
- This field may not be empty.
+ This field may be empty.
+ maxLength: 256
+ minLength: 1
type: string
severity:
description: |-
- Severity provides an explicit classification of Reason code, so the users or machines can immediately
+ severity provides an explicit classification of Reason code, so the users or machines can immediately
understand the current situation and act accordingly.
The Severity field MUST be set only when Status=False.
+ maxLength: 32
type: string
status:
- description: Status of the condition, one of True, False,
+ description: status of the condition, one of True, False,
Unknown.
type: string
type:
description: |-
- Type of condition in CamelCase or in foo.example.com/CamelCase.
+ type of condition in CamelCase or in foo.example.com/CamelCase.
Many .condition.type values are consistent across resources like Available, but because arbitrary conditions
can be useful (see .node.status.conditions), the ability to deconflict is important.
+ maxLength: 256
+ minLength: 1
type: string
required:
- lastTransitionTime
diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_hetznerbaremetalhosts.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_hetznerbaremetalhosts.yaml
index 5317afa25..e347061c3 100644
--- a/config/crd/bases/infrastructure.cluster.x-k8s.io_hetznerbaremetalhosts.yaml
+++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_hetznerbaremetalhosts.yaml
@@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
- controller-gen.kubebuilder.io/version: v0.14.0
+ controller-gen.kubebuilder.io/version: v0.18.0
name: hetznerbaremetalhosts.infrastructure.cluster.x-k8s.io
spec:
group: infrastructure.cluster.x-k8s.io
@@ -98,7 +98,6 @@ spec:
the event) or if no container name is specified "spec.containers[2]" (container with
index 2 in this pod). This syntax is chosen only to have some well-defined way of
referencing a part of an object.
- TODO: this design is not final and this field is subject to change in the future.
type: string
kind:
description: |-
@@ -180,37 +179,44 @@ spec:
properties:
lastTransitionTime:
description: |-
- Last time the condition transitioned from one status to another.
+ lastTransitionTime is the last time the condition transitioned from one status to another.
This should be when the underlying condition changed. If that is not known, then using the time when
the API field changed is acceptable.
format: date-time
type: string
message:
description: |-
- A human readable message indicating details about the transition.
+ message is a human readable message indicating details about the transition.
This field may be empty.
+ maxLength: 10240
+ minLength: 1
type: string
reason:
description: |-
- The reason for the condition's last transition in CamelCase.
+ reason is the reason for the condition's last transition in CamelCase.
The specific API may choose whether or not this field is considered a guaranteed API.
- This field may not be empty.
+ This field may be empty.
+ maxLength: 256
+ minLength: 1
type: string
severity:
description: |-
- Severity provides an explicit classification of Reason code, so the users or machines can immediately
+ severity provides an explicit classification of Reason code, so the users or machines can immediately
understand the current situation and act accordingly.
The Severity field MUST be set only when Status=False.
+ maxLength: 32
type: string
status:
- description: Status of the condition, one of True, False,
+ description: status of the condition, one of True, False,
Unknown.
type: string
type:
description: |-
- Type of condition in CamelCase or in foo.example.com/CamelCase.
+ type of condition in CamelCase or in foo.example.com/CamelCase.
Many .condition.type values are consistent across resources like Available, but because arbitrary conditions
can be useful (see .node.status.conditions), the ability to deconflict is important.
+ maxLength: 256
+ minLength: 1
type: string
required:
- lastTransitionTime
@@ -232,6 +238,20 @@ spec:
ErrorType indicates the type of failure encountered when the
OperationalStatus is OperationalStatusError.
type: string
+ externalIDs:
+ description: ExternalIDs contains values from external systems.
+ properties:
+ rebootAnnotationNodeBootID:
+ description: |-
+ RebootAnnotationNodeBootID reflects the BootID of the Node resource in the workload-cluster.
+ Only set when the machine gets rebooted.
+ type: string
+ rebootAnnotationSince:
+ description: RebootAnnotationSince indicates when the reboot
+ via Annotation started.
+ format: date-time
+ type: string
+ type: object
hardwareDetails:
description: StatusHardwareDetails are automatically gathered
and should not be modified by the user.
@@ -376,6 +396,11 @@ spec:
a tar, tar.gz, tar.bz, tar.bz2, tar.xz, tgz, tbz, txz
image.
type: string
+ useCustomImageURLCommand:
+ description: |-
+ UseCustomImageURLCommand makes the controller use the command provided by `--baremetal-image-url-command` instead of installimage.
+ Docs: https://syself.com/docs/caph/developers/image-url-command
+ type: boolean
type: object
logicalVolumeDefinitions:
description: LVMDefinitions defines the logical volume definitions
@@ -499,8 +524,8 @@ spec:
properties:
portAfterCloudInit:
description: |-
- PortAfterCloudInit specifies the port that has to be used to connect to the machine
- by reaching the server via SSH after the successful completion of cloud init.
+ PortAfterCloudInit is deprecated. Since PR Install Cloud-Init-Data via post-install.sh #1407 this field is not functional.
+ Deprecated: This field is not used anymore.
type: integer
portAfterInstallImage:
default: 22
diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_hetznerbaremetalmachines.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_hetznerbaremetalmachines.yaml
index c29e06e40..9d7cfe261 100644
--- a/config/crd/bases/infrastructure.cluster.x-k8s.io_hetznerbaremetalmachines.yaml
+++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_hetznerbaremetalmachines.yaml
@@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
- controller-gen.kubebuilder.io/version: v0.14.0
+ controller-gen.kubebuilder.io/version: v0.18.0
name: hetznerbaremetalmachines.infrastructure.cluster.x-k8s.io
spec:
group: infrastructure.cluster.x-k8s.io
@@ -155,6 +155,11 @@ spec:
description: URL defines the remote URL for downloading a
tar, tar.gz, tar.bz, tar.bz2, tar.xz, tgz, tbz, txz image.
type: string
+ useCustomImageURLCommand:
+ description: |-
+ UseCustomImageURLCommand makes the controller use the command provided by `--baremetal-image-url-command` instead of installimage.
+ Docs: https://syself.com/docs/caph/developers/image-url-command
+ type: boolean
type: object
logicalVolumeDefinitions:
description: LVMDefinitions defines the logical volume definitions
@@ -258,8 +263,8 @@ spec:
properties:
portAfterCloudInit:
description: |-
- PortAfterCloudInit specifies the port that has to be used to connect to the machine
- by reaching the server via SSH after the successful completion of cloud init.
+ PortAfterCloudInit is deprecated. Since PR Install Cloud-Init-Data via post-install.sh #1407 this field is not functional.
+ Deprecated: This field is not used anymore.
type: integer
portAfterInstallImage:
default: 22
@@ -318,11 +323,19 @@ spec:
address.
properties:
address:
- description: The machine address.
+ description: address is the machine address.
+ maxLength: 256
+ minLength: 1
type: string
type:
- description: Machine address type, one of Hostname, ExternalIP,
- InternalIP, ExternalDNS or InternalDNS.
+ description: type is the machine address type, one of Hostname,
+ ExternalIP, InternalIP, ExternalDNS or InternalDNS.
+ enum:
+ - Hostname
+ - ExternalIP
+ - InternalIP
+ - ExternalDNS
+ - InternalDNS
type: string
required:
- address
@@ -337,36 +350,43 @@ spec:
properties:
lastTransitionTime:
description: |-
- Last time the condition transitioned from one status to another.
+ lastTransitionTime is the last time the condition transitioned from one status to another.
This should be when the underlying condition changed. If that is not known, then using the time when
the API field changed is acceptable.
format: date-time
type: string
message:
description: |-
- A human readable message indicating details about the transition.
+ message is a human readable message indicating details about the transition.
This field may be empty.
+ maxLength: 10240
+ minLength: 1
type: string
reason:
description: |-
- The reason for the condition's last transition in CamelCase.
+ reason is the reason for the condition's last transition in CamelCase.
The specific API may choose whether or not this field is considered a guaranteed API.
- This field may not be empty.
+ This field may be empty.
+ maxLength: 256
+ minLength: 1
type: string
severity:
description: |-
- Severity provides an explicit classification of Reason code, so the users or machines can immediately
+ severity provides an explicit classification of Reason code, so the users or machines can immediately
understand the current situation and act accordingly.
The Severity field MUST be set only when Status=False.
+ maxLength: 32
type: string
status:
- description: Status of the condition, one of True, False, Unknown.
+ description: status of the condition, one of True, False, Unknown.
type: string
type:
description: |-
- Type of condition in CamelCase or in foo.example.com/CamelCase.
+ type of condition in CamelCase or in foo.example.com/CamelCase.
Many .condition.type values are consistent across resources like Available, but because arbitrary conditions
can be useful (see .node.status.conditions), the ability to deconflict is important.
+ maxLength: 256
+ minLength: 1
type: string
required:
- lastTransitionTime
diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_hetznerbaremetalmachinetemplates.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_hetznerbaremetalmachinetemplates.yaml
index 9625e4c70..c00a8329f 100644
--- a/config/crd/bases/infrastructure.cluster.x-k8s.io_hetznerbaremetalmachinetemplates.yaml
+++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_hetznerbaremetalmachinetemplates.yaml
@@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
- controller-gen.kubebuilder.io/version: v0.14.0
+ controller-gen.kubebuilder.io/version: v0.18.0
name: hetznerbaremetalmachinetemplates.infrastructure.cluster.x-k8s.io
spec:
group: infrastructure.cluster.x-k8s.io
@@ -142,6 +142,11 @@ spec:
a tar, tar.gz, tar.bz, tar.bz2, tar.xz, tgz, tbz,
txz image.
type: string
+ useCustomImageURLCommand:
+ description: |-
+ UseCustomImageURLCommand makes the controller use the command provided by `--baremetal-image-url-command` instead of installimage.
+ Docs: https://syself.com/docs/caph/developers/image-url-command
+ type: boolean
type: object
logicalVolumeDefinitions:
description: LVMDefinitions defines the logical volume
@@ -245,8 +250,8 @@ spec:
properties:
portAfterCloudInit:
description: |-
- PortAfterCloudInit specifies the port that has to be used to connect to the machine
- by reaching the server via SSH after the successful completion of cloud init.
+ PortAfterCloudInit is deprecated. Since PR Install Cloud-Init-Data via post-install.sh #1407 this field is not functional.
+ Deprecated: This field is not used anymore.
type: integer
portAfterInstallImage:
default: 22
diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_hetznerbaremetalremediations.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_hetznerbaremetalremediations.yaml
index 8b8647e7d..ea9cd1a8e 100644
--- a/config/crd/bases/infrastructure.cluster.x-k8s.io_hetznerbaremetalremediations.yaml
+++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_hetznerbaremetalremediations.yaml
@@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
- controller-gen.kubebuilder.io/version: v0.14.0
+ controller-gen.kubebuilder.io/version: v0.18.0
name: hetznerbaremetalremediations.infrastructure.cluster.x-k8s.io
spec:
group: infrastructure.cluster.x-k8s.io
diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_hetznerbaremetalremediationtemplates.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_hetznerbaremetalremediationtemplates.yaml
index ac99d5b3b..d2fdd8c84 100644
--- a/config/crd/bases/infrastructure.cluster.x-k8s.io_hetznerbaremetalremediationtemplates.yaml
+++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_hetznerbaremetalremediationtemplates.yaml
@@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
- controller-gen.kubebuilder.io/version: v0.14.0
+ controller-gen.kubebuilder.io/version: v0.18.0
name: hetznerbaremetalremediationtemplates.infrastructure.cluster.x-k8s.io
spec:
group: infrastructure.cluster.x-k8s.io
diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_hetznerclusters.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_hetznerclusters.yaml
index c49cb2063..852ed38bb 100644
--- a/config/crd/bases/infrastructure.cluster.x-k8s.io_hetznerclusters.yaml
+++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_hetznerclusters.yaml
@@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
- controller-gen.kubebuilder.io/version: v0.14.0
+ controller-gen.kubebuilder.io/version: v0.18.0
name: hetznerclusters.infrastructure.cluster.x-k8s.io
spec:
group: infrastructure.cluster.x-k8s.io
@@ -76,10 +76,11 @@ spec:
communicate with the control plane.
properties:
host:
- description: The hostname on which the API server is serving.
+ description: host is the hostname on which the API server is serving.
+ maxLength: 512
type: string
port:
- description: The port on which the API server is serving.
+ description: port is the port on which the API server is serving.
format: int32
type: integer
required:
@@ -351,36 +352,43 @@ spec:
properties:
lastTransitionTime:
description: |-
- Last time the condition transitioned from one status to another.
+ lastTransitionTime is the last time the condition transitioned from one status to another.
This should be when the underlying condition changed. If that is not known, then using the time when
the API field changed is acceptable.
format: date-time
type: string
message:
description: |-
- A human readable message indicating details about the transition.
+ message is a human readable message indicating details about the transition.
This field may be empty.
+ maxLength: 10240
+ minLength: 1
type: string
reason:
description: |-
- The reason for the condition's last transition in CamelCase.
+ reason is the reason for the condition's last transition in CamelCase.
The specific API may choose whether or not this field is considered a guaranteed API.
- This field may not be empty.
+ This field may be empty.
+ maxLength: 256
+ minLength: 1
type: string
severity:
description: |-
- Severity provides an explicit classification of Reason code, so the users or machines can immediately
+ severity provides an explicit classification of Reason code, so the users or machines can immediately
understand the current situation and act accordingly.
The Severity field MUST be set only when Status=False.
+ maxLength: 32
type: string
status:
- description: Status of the condition, one of True, False, Unknown.
+ description: status of the condition, one of True, False, Unknown.
type: string
type:
description: |-
- Type of condition in CamelCase or in foo.example.com/CamelCase.
+ type of condition in CamelCase or in foo.example.com/CamelCase.
Many .condition.type values are consistent across resources like Available, but because arbitrary conditions
can be useful (see .node.status.conditions), the ability to deconflict is important.
+ maxLength: 256
+ minLength: 1
type: string
required:
- lastTransitionTime
@@ -433,11 +441,11 @@ spec:
attributes:
additionalProperties:
type: string
- description: Attributes is a free form map of attributes an
+ description: attributes is a free form map of attributes an
infrastructure provider might use or require.
type: object
controlPlane:
- description: ControlPlane determines if this failure domain
+ description: controlPlane determines if this failure domain
is suitable for use by control plane machines.
type: boolean
type: object
diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_hetznerclustertemplates.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_hetznerclustertemplates.yaml
index c2cc419f3..a4c1b20c7 100644
--- a/config/crd/bases/infrastructure.cluster.x-k8s.io_hetznerclustertemplates.yaml
+++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_hetznerclustertemplates.yaml
@@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
- controller-gen.kubebuilder.io/version: v0.14.0
+ controller-gen.kubebuilder.io/version: v0.18.0
name: hetznerclustertemplates.infrastructure.cluster.x-k8s.io
spec:
group: infrastructure.cluster.x-k8s.io
@@ -52,26 +52,22 @@ spec:
ObjectMeta is metadata that all persisted resources must have, which includes all objects
users must create. This is a copy of customizable fields from metav1.ObjectMeta.
-
ObjectMeta is embedded in `Machine.Spec`, `MachineDeployment.Template` and `MachineSet.Template`,
which are not top-level Kubernetes objects. Given that metav1.ObjectMeta has lots of special cases
and read-only fields which end up in the generated CRD validation, having it as a subset simplifies
the API and some issues that can impact user experience.
-
During the [upgrade to controller-tools@v2](https://github.com/kubernetes-sigs/cluster-api/pull/1054)
for v1alpha2, we noticed a failure would occur running Cluster API test suite against the new CRDs,
specifically `spec.metadata.creationTimestamp in body must be of type string: "null"`.
The investigation showed that `controller-tools@v2` behaves differently than its previous version
when handling types from [metav1](k8s.io/apimachinery/pkg/apis/meta/v1) package.
-
In more details, we found that embedded (non-top level) types that embedded `metav1.ObjectMeta`
had validation properties, including for `creationTimestamp` (metav1.Time).
The `metav1.Time` type specifies a custom json marshaller that, when IsZero() is true, returns `null`
which breaks validation because the field isn't marked as nullable.
-
In future versions, controller-tools@v2 might allow overriding the type and validation for embedded
types. When that happens, this hack should be revisited.
properties:
@@ -79,7 +75,7 @@ spec:
additionalProperties:
type: string
description: |-
- Annotations is an unstructured key value map stored with a resource that may be
+ annotations is an unstructured key value map stored with a resource that may be
set by external tools to store and retrieve arbitrary metadata. They are not
queryable and should be preserved when modifying objects.
More info: http://kubernetes.io/docs/user-guide/annotations
@@ -88,7 +84,7 @@ spec:
additionalProperties:
type: string
description: |-
- Map of string keys and values that can be used to organize and categorize
+ labels is a map of string keys and values that can be used to organize and categorize
(scope and select) objects. May match selectors of replication controllers
and services.
More info: http://kubernetes.io/docs/user-guide/labels
@@ -102,10 +98,13 @@ spec:
used to communicate with the control plane.
properties:
host:
- description: The hostname on which the API server is serving.
+ description: host is the hostname on which the API server
+ is serving.
+ maxLength: 512
type: string
port:
- description: The port on which the API server is serving.
+ description: port is the port on which the API server
+ is serving.
format: int32
type: integer
required:
diff --git a/config/rbac/role.yaml b/config/rbac/role.yaml
index 6147cd2dd..6058f145c 100644
--- a/config/rbac/role.yaml
+++ b/config/rbac/role.yaml
@@ -70,155 +70,11 @@ rules:
- infrastructure.cluster.x-k8s.io
resources:
- hcloudmachines
- verbs:
- - create
- - delete
- - get
- - list
- - patch
- - update
- - watch
-- apiGroups:
- - infrastructure.cluster.x-k8s.io
- resources:
- - hcloudmachines/finalizers
- verbs:
- - update
-- apiGroups:
- - infrastructure.cluster.x-k8s.io
- resources:
- - hcloudmachines/status
- verbs:
- - get
- - patch
- - update
-- apiGroups:
- - infrastructure.cluster.x-k8s.io
- resources:
- hcloudmachinetemplates
- verbs:
- - create
- - delete
- - get
- - list
- - patch
- - update
- - watch
-- apiGroups:
- - infrastructure.cluster.x-k8s.io
- resources:
- - hcloudmachinetemplates/status
- verbs:
- - get
- - patch
- - update
-- apiGroups:
- - infrastructure.cluster.x-k8s.io
- resources:
- hcloudremediations
- verbs:
- - create
- - delete
- - get
- - list
- - patch
- - update
- - watch
-- apiGroups:
- - infrastructure.cluster.x-k8s.io
- resources:
- - hcloudremediations/finalizers
- verbs:
- - update
-- apiGroups:
- - infrastructure.cluster.x-k8s.io
- resources:
- - hcloudremediations/status
- verbs:
- - get
- - patch
- - update
-- apiGroups:
- - infrastructure.cluster.x-k8s.io
- resources:
- hetznerbaremetalhosts
- verbs:
- - create
- - delete
- - get
- - list
- - patch
- - update
- - watch
-- apiGroups:
- - infrastructure.cluster.x-k8s.io
- resources:
- - hetznerbaremetalhosts/finalizers
- verbs:
- - update
-- apiGroups:
- - infrastructure.cluster.x-k8s.io
- resources:
- - hetznerbaremetalhosts/status
- verbs:
- - get
- - patch
- - update
-- apiGroups:
- - infrastructure.cluster.x-k8s.io
- resources:
- hetznerbaremetalmachines
- verbs:
- - create
- - delete
- - get
- - list
- - patch
- - update
- - watch
-- apiGroups:
- - infrastructure.cluster.x-k8s.io
- resources:
- - hetznerbaremetalmachines/finalizers
- verbs:
- - update
-- apiGroups:
- - infrastructure.cluster.x-k8s.io
- resources:
- - hetznerbaremetalmachines/status
- verbs:
- - get
- - patch
- - update
-- apiGroups:
- - infrastructure.cluster.x-k8s.io
- resources:
- hetznerbaremetalremediations
- verbs:
- - create
- - delete
- - get
- - list
- - patch
- - update
- - watch
-- apiGroups:
- - infrastructure.cluster.x-k8s.io
- resources:
- - hetznerbaremetalremediations/finalizers
- verbs:
- - update
-- apiGroups:
- - infrastructure.cluster.x-k8s.io
- resources:
- - hetznerbaremetalremediations/status
- verbs:
- - get
- - patch
- - update
-- apiGroups:
- - infrastructure.cluster.x-k8s.io
- resources:
- hetznerclusters
verbs:
- create
@@ -231,12 +87,23 @@ rules:
- apiGroups:
- infrastructure.cluster.x-k8s.io
resources:
+ - hcloudmachines/finalizers
+ - hcloudremediations/finalizers
+ - hetznerbaremetalhosts/finalizers
+ - hetznerbaremetalmachines/finalizers
+ - hetznerbaremetalremediations/finalizers
- hetznerclusters/finalizers
verbs:
- update
- apiGroups:
- infrastructure.cluster.x-k8s.io
resources:
+ - hcloudmachines/status
+ - hcloudmachinetemplates/status
+ - hcloudremediations/status
+ - hetznerbaremetalhosts/status
+ - hetznerbaremetalmachines/status
+ - hetznerbaremetalremediations/status
- hetznerclusters/status
verbs:
- get
diff --git a/controllers/controllers_suite_test.go b/controllers/controllers_suite_test.go
index e2919548d..54f351ef6 100644
--- a/controllers/controllers_suite_test.go
+++ b/controllers/controllers_suite_test.go
@@ -83,6 +83,7 @@ var _ = BeforeSuite(func() {
Client: testEnv.Manager.GetClient(),
APIReader: testEnv.Manager.GetAPIReader(),
HCloudClientFactory: testEnv.HCloudClientFactory,
+ SSHClientFactory: testEnv.BaremetalSSHClientFactory,
}).SetupWithManager(ctx, testEnv.Manager, controller.Options{})).To(Succeed())
Expect((&HCloudMachineTemplateReconciler{
@@ -95,7 +96,7 @@ var _ = BeforeSuite(func() {
Client: testEnv.Manager.GetClient(),
APIReader: testEnv.Manager.GetAPIReader(),
RobotClientFactory: testEnv.RobotClientFactory,
- SSHClientFactory: testEnv.SSHClientFactory,
+ SSHClientFactory: testEnv.BaremetalSSHClientFactory,
PreProvisionCommand: "dummy-pre-provision-command",
}).SetupWithManager(ctx, testEnv.Manager, controller.Options{})).To(Succeed())
@@ -285,7 +286,6 @@ func getDefaultHetznerBareMetalMachineSpec() infrav1.HetznerBareMetalMachineSpec
},
},
PortAfterInstallImage: 22,
- PortAfterCloudInit: 22,
},
}
}
diff --git a/controllers/csr_controller.go b/controllers/csr_controller.go
index 40a9df768..5e5cfd726 100644
--- a/controllers/csr_controller.go
+++ b/controllers/csr_controller.go
@@ -287,7 +287,7 @@ func (r *GuestCSRReconciler) SetupWithManager(ctx context.Context, mgr ctrl.Mana
return ctrl.NewControllerManagedBy(mgr).
WithOptions(options).
For(&certificatesv1.CertificateSigningRequest{}).
- WithEventFilter(predicates.ResourceNotPausedAndHasFilterLabel(ctrl.LoggerFrom(ctx), r.WatchFilterValue)).
+ WithEventFilter(predicates.ResourceNotPausedAndHasFilterLabel(mgr.GetScheme(), ctrl.LoggerFrom(ctx), r.WatchFilterValue)).
WithEventFilter(predicate.Funcs{
DeleteFunc: func(_ event.DeleteEvent) bool {
// We don't want to listen to delete events, as CSRs are deleted frequently without us having to do something
diff --git a/controllers/hcloudmachine_controller.go b/controllers/hcloudmachine_controller.go
index 7ea00ca49..c5ac5dea7 100644
--- a/controllers/hcloudmachine_controller.go
+++ b/controllers/hcloudmachine_controller.go
@@ -25,7 +25,9 @@ import (
"time"
"github.com/go-logr/logr"
+ "github.com/google/go-cmp/cmp"
apierrors "k8s.io/apimachinery/pkg/api/errors"
+ "k8s.io/apimachinery/pkg/util/wait"
"k8s.io/klog/v2"
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
"sigs.k8s.io/cluster-api/util"
@@ -46,6 +48,7 @@ import (
infrav1 "github.com/syself/cluster-api-provider-hetzner/api/v1beta1"
"github.com/syself/cluster-api-provider-hetzner/pkg/scope"
secretutil "github.com/syself/cluster-api-provider-hetzner/pkg/secrets"
+ sshclient "github.com/syself/cluster-api-provider-hetzner/pkg/services/baremetal/client/ssh"
hcloudclient "github.com/syself/cluster-api-provider-hetzner/pkg/services/hcloud/client"
"github.com/syself/cluster-api-provider-hetzner/pkg/services/hcloud/server"
)
@@ -56,7 +59,9 @@ type HCloudMachineReconciler struct {
RateLimitWaitTime time.Duration
APIReader client.Reader
HCloudClientFactory hcloudclient.Factory
+ SSHClientFactory sshclient.Factory
WatchFilterValue string
+ ImageURLCommand string
}
//+kubebuilder:rbac:groups="",resources=events,verbs=get;list;watch;create;update;patch
@@ -138,13 +143,17 @@ func (r *HCloudMachineReconciler) Reconcile(ctx context.Context, req reconcile.R
HetznerSecret: hetznerSecret,
APIReader: r.APIReader,
},
- Machine: machine,
- HCloudMachine: hcloudMachine,
+ Machine: machine,
+ HCloudMachine: hcloudMachine,
+ SSHClientFactory: r.SSHClientFactory,
+ ImageURLCommand: r.ImageURLCommand,
})
if err != nil {
return reconcile.Result{}, fmt.Errorf("failed to create scope: %+v", err)
}
+ initialHCloudMachine := hcloudMachine.DeepCopy()
+ startReconcile := time.Now()
// Always close the scope when exiting this function so we can persist any HCloudMachine changes.
defer func() {
if reterr != nil && errors.Is(reterr, hcloudclient.ErrUnauthorized) {
@@ -153,9 +162,65 @@ func (r *HCloudMachineReconciler) Reconcile(ctx context.Context, req reconcile.R
conditions.MarkTrue(hcloudMachine, infrav1.HCloudTokenAvailableCondition)
}
- if err := machineScope.Close(ctx); err != nil && reterr == nil {
+ // the Close() will use PatchHelper to store the changes.
+ if err := machineScope.Close(ctx); err != nil {
res = reconcile.Result{}
- reterr = err
+ reterr = errors.Join(reterr, err)
+ }
+
+ if !cmp.Equal(initialHCloudMachine, hcloudMachine) {
+ // The hcloudMachine was changed. Wait until the local cache contains the revision
+ // which was created by above machineScope.Close().
+ // We want to read our own writes.
+ err := wait.PollUntilContextTimeout(ctx, 100*time.Millisecond, 5*time.Second, true, func(ctx context.Context) (done bool, err error) {
+ // new resource, read from local cache
+ latest := &infrav1.HCloudMachine{}
+ getErr := r.Get(ctx, client.ObjectKeyFromObject(machineScope.HCloudMachine), latest)
+ if apierrors.IsNotFound(getErr) {
+ // the object was deleted. All is fine.
+ return true, nil
+ }
+ if getErr != nil {
+ return false, getErr
+ }
+ // When the ResourceVersion has changed, then it is very likely that the local
+ // cache has the new version.
+ return latest.ResourceVersion != hcloudMachine.ResourceVersion, nil
+ })
+ if err != nil {
+ log.Error(err, "cache sync failed after BootState change")
+ }
+ }
+
+ readyReason := conditions.GetReason(machineScope.HCloudMachine, clusterv1.ReadyCondition)
+ readyMessage := conditions.GetMessage(machineScope.HCloudMachine, clusterv1.ReadyCondition)
+
+ duration := time.Since(startReconcile)
+
+ if duration > 5*time.Second {
+ log.Info("Reconcile took too long",
+ "reconcileDuration", duration,
+ "res", res,
+ "reterr", reterr,
+ "oldState", initialHCloudMachine.Status.BootState,
+ "newState", machineScope.HCloudMachine.Status.BootState,
+ "readyReason", readyReason,
+ "readyMessage", readyMessage,
+ )
+ }
+
+ if initialHCloudMachine.Status.BootState != machineScope.HCloudMachine.Status.BootState {
+ startBootState := initialHCloudMachine.Status.BootStateSince
+ if startBootState.IsZero() {
+ startBootState = initialHCloudMachine.CreationTimestamp
+ }
+ log.Info("BootState changed",
+ "oldState", initialHCloudMachine.Status.BootState,
+ "newState", machineScope.HCloudMachine.Status.BootState,
+ "durationInState", machineScope.HCloudMachine.Status.BootStateSince.Time.Sub(startBootState.Time).Round(time.Second),
+ "readyReason", readyReason,
+ "readyMessage", readyMessage,
+ )
}
}()
@@ -168,6 +233,10 @@ func (r *HCloudMachineReconciler) Reconcile(ctx context.Context, req reconcile.R
return r.reconcileDelete(ctx, machineScope)
}
+ if hcloudMachine.Status.FailureReason != nil {
+ // This machine will be removed.
+ return reconcile.Result{}, nil
+ }
return r.reconcileNormal(ctx, machineScope)
}
@@ -224,7 +293,7 @@ func (r *HCloudMachineReconciler) SetupWithManager(ctx context.Context, mgr ctrl
err = ctrl.NewControllerManagedBy(mgr).
WithOptions(options).
For(&infrav1.HCloudMachine{}).
- WithEventFilter(predicates.ResourceNotPausedAndHasFilterLabel(log, r.WatchFilterValue)).
+ WithEventFilter(predicates.ResourceNotPausedAndHasFilterLabel(mgr.GetScheme(), log, r.WatchFilterValue)).
WithEventFilter(IgnoreInsignificantHCloudMachineStatusUpdates(log)).
Watches(
&clusterv1.Machine{},
@@ -239,7 +308,7 @@ func (r *HCloudMachineReconciler) SetupWithManager(ctx context.Context, mgr ctrl
Watches(
&clusterv1.Cluster{},
handler.EnqueueRequestsFromMapFunc(clusterToObjectFunc),
- builder.WithPredicates(predicates.ClusterUnpausedAndInfrastructureReady(log)),
+ builder.WithPredicates(predicates.ClusterPausedTransitionsOrInfrastructureReady(mgr.GetScheme(), log)),
).
Complete(r)
if err != nil {
@@ -460,6 +529,13 @@ func IgnoreInsignificantHCloudMachineStatusUpdates(logger logr.Logger) predicate
oldHCloudMachine.ResourceVersion = ""
newHCloudMachine.ResourceVersion = ""
+ // The ProviderID is set by the controller. Do not react if that changes.
+ // Otherwise the next Reconcile is likely to read outdated data, because
+ // the Status was not updated yet. PatchHelper updates three times in this order:
+ // Status.Conditions, Resource, Status.
+ oldHCloudMachine.Spec.ProviderID = nil
+ newHCloudMachine.Spec.ProviderID = nil
+
oldHCloudMachine.Status = infrav1.HCloudMachineStatus{}
newHCloudMachine.Status = infrav1.HCloudMachineStatus{}
diff --git a/controllers/hcloudmachine_controller_test.go b/controllers/hcloudmachine_controller_test.go
index 361210f84..c4b501cd1 100644
--- a/controllers/hcloudmachine_controller_test.go
+++ b/controllers/hcloudmachine_controller_test.go
@@ -365,7 +365,7 @@ var _ = Describe("HCloudMachineReconciler", func() {
}, timeout).Should(BeTrue())
})
- It("creates the HCloud machine in Hetzner 1", func() {
+ It("creates the HCloud machine in Hetzner 1 (flaky)", func() {
By("checking that no servers exist")
Eventually(func() bool {
@@ -433,6 +433,24 @@ var _ = Describe("HCloudMachineReconciler", func() {
Eventually(func() bool {
return isPresentAndTrue(key, hcloudMachine, infrav1.ServerAvailableCondition)
}, timeout, interval).Should(BeTrue())
+
+ By("checking if the BootState is now OperatingSystemRunning")
+ Eventually(func() bool {
+ if err = testEnv.Get(ctx, key, hcloudMachine); err != nil {
+ return false
+ }
+
+ return hcloudMachine.Status.BootState == infrav1.HCloudBootStateOperatingSystemRunning && !hcloudMachine.Status.BootStateSince.IsZero()
+ }, timeout, interval).Should(BeTrue())
+
+ By("checking if the ssh keys are set in the status")
+ Eventually(func() bool {
+ if err = testEnv.Get(ctx, key, hcloudMachine); err != nil {
+ return false
+ }
+
+ return len(hcloudMachine.Status.SSHKeys) == 1 && hcloudMachine.Status.SSHKeys[0].Name == "testsshkey"
+ }, timeout, interval).Should(BeTrue())
})
})
@@ -808,11 +826,6 @@ var _ = Describe("HCloudMachine validation", func() {
Expect(testEnv.Cleanup(ctx, testNs, hcloudMachine)).To(Succeed())
})
- It("should fail with wrong type", func() {
- hcloudMachine.Spec.Type = "wrong-type"
- Expect(testEnv.Create(ctx, hcloudMachine)).ToNot(Succeed())
- })
-
It("should fail without imageName", func() {
hcloudMachine.Spec.ImageName = ""
Expect(testEnv.Create(ctx, hcloudMachine)).ToNot(Succeed())
diff --git a/controllers/hcloudmachinetemplate_controller.go b/controllers/hcloudmachinetemplate_controller.go
index d2be61d6c..08a089b73 100644
--- a/controllers/hcloudmachinetemplate_controller.go
+++ b/controllers/hcloudmachinetemplate_controller.go
@@ -167,7 +167,7 @@ func (r *HCloudMachineTemplateReconciler) SetupWithManager(ctx context.Context,
return ctrl.NewControllerManagedBy(mgr).
WithOptions(options).
For(&infrav1.HCloudMachineTemplate{}).
- WithEventFilter(predicates.ResourceNotPausedAndHasFilterLabel(ctrl.LoggerFrom(ctx), r.WatchFilterValue)).
+ WithEventFilter(predicates.ResourceNotPausedAndHasFilterLabel(mgr.GetScheme(), ctrl.LoggerFrom(ctx), r.WatchFilterValue)).
Complete(r)
}
diff --git a/controllers/hcloudremediation_controller.go b/controllers/hcloudremediation_controller.go
index d6ccb5163..dc2af1734 100644
--- a/controllers/hcloudremediation_controller.go
+++ b/controllers/hcloudremediation_controller.go
@@ -164,9 +164,9 @@ func (r *HCloudRemediationReconciler) Reconcile(ctx context.Context, req reconci
patchOpts := []patch.Option{}
patchOpts = append(patchOpts, patch.WithStatusObservedGeneration{})
- if err := remediationScope.Close(ctx, patchOpts...); err != nil && reterr == nil {
+ if err := remediationScope.Close(ctx, patchOpts...); err != nil {
res = reconcile.Result{}
- reterr = err
+ reterr = errors.Join(reterr, err)
}
}()
@@ -201,6 +201,6 @@ func (r *HCloudRemediationReconciler) SetupWithManager(ctx context.Context, mgr
return ctrl.NewControllerManagedBy(mgr).
For(&infrav1.HCloudRemediation{}).
WithOptions(options).
- WithEventFilter(predicates.ResourceNotPausedAndHasFilterLabel(ctrl.LoggerFrom(ctx), r.WatchFilterValue)).
+ WithEventFilter(predicates.ResourceNotPausedAndHasFilterLabel(mgr.GetScheme(), ctrl.LoggerFrom(ctx), r.WatchFilterValue)).
Complete(r)
}
diff --git a/controllers/hcloudremediation_controller_test.go b/controllers/hcloudremediation_controller_test.go
index 4a8fbad94..dc9cc8281 100644
--- a/controllers/hcloudremediation_controller_test.go
+++ b/controllers/hcloudremediation_controller_test.go
@@ -17,6 +17,7 @@ limitations under the License.
package controllers
import (
+ "fmt"
"time"
"github.com/hetznercloud/hcloud-go/v2/hcloud"
@@ -205,14 +206,16 @@ var _ = Describe("HCloudRemediationReconciler", func() {
It("checks that no remediation is tried if HCloud server does not exist anymore", func() {
By("ensuring if hcloudMachine is provisioned")
- Eventually(func() bool {
+ Eventually(func() error {
if err := testEnv.Get(ctx, hcloudMachineKey, hcloudMachine); err != nil {
- return false
+ return err
}
- testEnv.GetLogger().Info("Status of the hcloudmachine", "status", hcloudMachine.Status)
- return hcloudMachine.Status.Ready
- }, timeout).Should(BeTrue())
+ if !hcloudMachine.Status.Ready {
+ return fmt.Errorf("hcloudMachine.Status.Ready is not true (yet)")
+ }
+ return nil
+ }, timeout).ShouldNot(HaveOccurred())
By("deleting the server associated with the hcloudMachine")
providerID, err := hcloudutil.ServerIDFromProviderID(hcloudMachine.Spec.ProviderID)
@@ -235,28 +238,60 @@ var _ = Describe("HCloudRemediationReconciler", func() {
})
It("checks that, under normal conditions, a reboot is carried out and retryCount and lastRemediated are set", func() {
+ // Wait until machine has a ProviderID
+ Eventually(func() error {
+ err := testEnv.Client.Get(ctx, hcloudMachineKey, hcloudMachine)
+ if err != nil {
+ return err
+ }
+ if hcloudMachine.Spec.ProviderID == nil {
+ return fmt.Errorf("hcloudMachine.Spec.ProviderID is still nil")
+ }
+ return nil
+ }).NotTo(HaveOccurred())
+
Expect(testEnv.Create(ctx, hcloudRemediation)).To(Succeed())
- Eventually(func() bool {
+ Eventually(func() error {
if err := testEnv.Get(ctx, hcloudRemediationkey, hcloudRemediation); err != nil {
- return false
+ return err
}
- return hcloudRemediation.Status.LastRemediated != nil && hcloudRemediation.Status.RetryCount == 1
- }, timeout).Should(BeTrue())
+ if hcloudRemediation.Status.LastRemediated == nil {
+ return fmt.Errorf("hcloudRemediation.Status.LastRemediated == nil")
+ }
+ if hcloudRemediation.Status.RetryCount != 1 {
+ return fmt.Errorf("hcloudRemediation.Status.RetryCount is %d", hcloudRemediation.Status.RetryCount)
+ }
+ return nil
+ }, timeout).ShouldNot(HaveOccurred())
})
It("checks if PhaseWaiting is set when retryLimit is reached", func() {
+ // Wait until machine has a ProviderID
+ Eventually(func() error {
+ err := testEnv.Client.Get(ctx, hcloudMachineKey, hcloudMachine)
+ if err != nil {
+ return err
+ }
+ if hcloudMachine.Spec.ProviderID == nil {
+ return fmt.Errorf("hcloudMachine.Spec.ProviderID is still nil")
+ }
+ return nil
+ }).NotTo(HaveOccurred())
+
+ hcloudRemediation.Status.RetryCount = hcloudRemediation.Spec.Strategy.RetryLimit
Expect(testEnv.Create(ctx, hcloudRemediation)).To(Succeed())
- Eventually(func() bool {
+ Eventually(func() error {
if err := testEnv.Get(ctx, hcloudRemediationkey, hcloudRemediation); err != nil {
- return false
+ return err
}
-
- testEnv.GetLogger().Info("status of hcloudRemediation", "status", hcloudRemediation.Status.Phase)
- return hcloudRemediation.Status.Phase == infrav1.PhaseWaiting
- }, timeout).Should(BeTrue())
+ if hcloudRemediation.Status.Phase != infrav1.PhaseWaiting {
+ return fmt.Errorf("hcloudRemediation.Status.Phase != infrav1.PhaseWaiting (phase is %s)", hcloudRemediation.Status.Phase)
+ }
+ return nil
+ }, timeout).ShouldNot(HaveOccurred())
})
It("should delete machine if retry limit reached and reboot timed out (hcloud)", func() {
diff --git a/controllers/hetznerbaremetalhost_controller.go b/controllers/hetznerbaremetalhost_controller.go
index 13ccfd8f4..74d2ca0d3 100644
--- a/controllers/hetznerbaremetalhost_controller.go
+++ b/controllers/hetznerbaremetalhost_controller.go
@@ -23,9 +23,11 @@ import (
"reflect"
"time"
+ "github.com/google/go-cmp/cmp"
corev1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/types"
+ "k8s.io/apimachinery/pkg/util/wait"
"k8s.io/klog/v2"
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
"sigs.k8s.io/cluster-api/util"
@@ -47,17 +49,20 @@ import (
robotclient "github.com/syself/cluster-api-provider-hetzner/pkg/services/baremetal/client/robot"
sshclient "github.com/syself/cluster-api-provider-hetzner/pkg/services/baremetal/client/ssh"
"github.com/syself/cluster-api-provider-hetzner/pkg/services/baremetal/host"
+ "github.com/syself/cluster-api-provider-hetzner/pkg/utils"
)
// HetznerBareMetalHostReconciler reconciles a HetznerBareMetalHost object.
type HetznerBareMetalHostReconciler struct {
client.Client
- RateLimitWaitTime time.Duration
- APIReader client.Reader
- RobotClientFactory robotclient.Factory
- SSHClientFactory sshclient.Factory
- WatchFilterValue string
- PreProvisionCommand string
+ RateLimitWaitTime time.Duration
+ APIReader client.Reader
+ RobotClientFactory robotclient.Factory
+ SSHClientFactory sshclient.Factory
+ WatchFilterValue string
+ PreProvisionCommand string
+ SSHAfterInstallImage bool
+ ImageURLCommand string
}
//+kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=hetznerbaremetalhosts,verbs=get;list;watch;create;update;patch;delete
@@ -87,6 +92,58 @@ func (r *HetznerBareMetalHostReconciler) Reconcile(ctx context.Context, req ctrl
return reconcile.Result{}, err
}
+ // ----------------------------------------------------------------
+ // Start: avoid conflict errors. Wait until local cache is up-to-date
+ // Won't be needed once this was implemented:
+ // https://github.com/kubernetes-sigs/controller-runtime/issues/3320
+ initialHost := bmHost.DeepCopy()
+ defer func() {
+ // We can potentially optimize this further by ensuring that the cache is up to date only in
+ // the cases where an outdated cache would lead to problems. Currently, we ensure that the
+ // cache is up to date in all cases, i.e. for all possible changes to the
+ // HetznerBareMetalHost object.
+ if cmp.Equal(initialHost, bmHost) {
+ // Nothing has changed. No need to wait.
+ return
+ }
+ startReadOwnWrite := time.Now()
+
+ // The object changed. Wait until the new version is in the local cache
+
+ // Get the latest version from the apiserver.
+ apiserverHost := &infrav1.HetznerBareMetalHost{}
+
+ // Use uncached APIReader
+ err := r.APIReader.Get(ctx, client.ObjectKeyFromObject(bmHost), apiserverHost)
+ if err != nil {
+ reterr = errors.Join(reterr,
+ fmt.Errorf("failed get HetznerBareMetalHost via uncached APIReader: %w", err))
+ return
+ }
+
+ apiserverRV := apiserverHost.ResourceVersion
+
+ err = wait.PollUntilContextTimeout(ctx, 100*time.Millisecond, 3*time.Second, true, func(ctx context.Context) (done bool, err error) {
+ // new resource, read from local cache
+ latestFromLocalCache := &infrav1.HetznerBareMetalHost{}
+ getErr := r.Get(ctx, client.ObjectKeyFromObject(apiserverHost), latestFromLocalCache)
+ if apierrors.IsNotFound(getErr) {
+ // the object was deleted. All is fine.
+ return true, nil
+ }
+ if getErr != nil {
+ return false, getErr
+ }
+ return utils.IsLocalCacheUpToDate(latestFromLocalCache.ResourceVersion, apiserverRV), nil
+ })
+ if err != nil {
+ log.Error(err, "cache sync failed after BootState change")
+ }
+ log.Info("Wait for update being in local cache", "durationWaitForLocalCacheSync", time.Since(startReadOwnWrite).Round(time.Millisecond))
+ }()
+ // End: avoid conflict errors. Wait until local cache is up-to-date
+ // ----------------------------------------------------------------
+
initialProvisioningState := bmHost.Spec.Status.ProvisioningState
defer func() {
if initialProvisioningState != bmHost.Spec.Status.ProvisioningState {
@@ -202,6 +259,8 @@ func (r *HetznerBareMetalHostReconciler) Reconcile(ctx context.Context, req ctrl
RescueSSHSecret: rescueSSHSecret,
SecretManager: secretManager,
PreProvisionCommand: r.PreProvisionCommand,
+ ImageURLCommand: r.ImageURLCommand,
+ SSHAfterInstallImage: r.SSHAfterInstallImage,
})
if err != nil {
return reconcile.Result{}, fmt.Errorf("failed to create scope: %w", err)
@@ -428,7 +487,7 @@ func (r *HetznerBareMetalHostReconciler) SetupWithManager(ctx context.Context, m
return ctrl.NewControllerManagedBy(mgr).
WithOptions(options).
For(&infrav1.HetznerBareMetalHost{}).
- WithEventFilter(predicates.ResourceNotPausedAndHasFilterLabel(ctrl.LoggerFrom(ctx), r.WatchFilterValue)).
+ WithEventFilter(predicates.ResourceNotPausedAndHasFilterLabel(mgr.GetScheme(), ctrl.LoggerFrom(ctx), r.WatchFilterValue)).
WithEventFilter(
predicate.Funcs{
UpdateFunc: func(e event.UpdateEvent) bool {
diff --git a/controllers/hetznerbaremetalhost_controller_test.go b/controllers/hetznerbaremetalhost_controller_test.go
index f9acabb9c..c01881b71 100644
--- a/controllers/hetznerbaremetalhost_controller_test.go
+++ b/controllers/hetznerbaremetalhost_controller_test.go
@@ -550,7 +550,6 @@ var _ = Describe("HetznerBareMetalHostReconciler", func() {
Spec: getDefaultHetznerBareMetalMachineSpec(),
}
bmMachine.Spec.SSHSpec.PortAfterInstallImage = 23
- bmMachine.Spec.SSHSpec.PortAfterCloudInit = 24
Expect(testEnv.Create(ctx, bmMachine)).To(Succeed())
})
@@ -853,8 +852,8 @@ NAME="nvme1n1" LABEL="" FSTYPE="" TYPE="disk" HCTL="" MODEL="SAMSUNG MZVLB512HAJ
Err: nil,
})
sshClient.On("GetHardwareDetailsNics").Return(sshclient.Output{
- StdOut: `name="eth0" model="Realtek Semiconductor Co., Ltd. RTL8111/8168/8411 PCI Express Gigabit Ethernet Controller (rev 15)" mac="a8:a1:59:94:19:42" ipv4="23.88.6.239/26" speedMbps="1000"
-name="eth0" model="Realtek Semiconductor Co., Ltd. RTL8111/8168/8411 PCI Express Gigabit Ethernet Controller (rev 15)" mac="a8:a1:59:94:19:42" ipv6="2a01:4f8:272:3e0f::2/64" speedMbps="1000"`,
+ StdOut: `name="eth0" model="Realtek Semiconductor Co., Ltd. RTL8111/8168/8411 PCI Express Gigabit Ethernet Controller (rev 15)" mac="a8:a1:59:94:19:42" ip="23.88.6.239/26" speedMbps="1000"
+name="eth0" model="Realtek Semiconductor Co., Ltd. RTL8111/8168/8411 PCI Express Gigabit Ethernet Controller (rev 15)" mac="a8:a1:59:94:19:42" ip="2a01:4f8:272:3e0f::2/64" speedMbps="1000"`,
StdErr: "",
Err: nil,
})
diff --git a/controllers/hetznerbaremetalmachine_controller.go b/controllers/hetznerbaremetalmachine_controller.go
index 35938853b..084ad06fa 100644
--- a/controllers/hetznerbaremetalmachine_controller.go
+++ b/controllers/hetznerbaremetalmachine_controller.go
@@ -144,9 +144,9 @@ func (r *HetznerBareMetalMachineReconciler) Reconcile(ctx context.Context, req r
conditions.SetSummary(hbmMachine)
- if err := machineScope.Close(ctx); err != nil && reterr == nil {
+ if err := machineScope.Close(ctx); err != nil {
res = reconcile.Result{}
- reterr = err
+ reterr = errors.Join(reterr, err)
}
}()
@@ -215,7 +215,7 @@ func (r *HetznerBareMetalMachineReconciler) SetupWithManager(ctx context.Context
err = ctrl.NewControllerManagedBy(mgr).
WithOptions(options).
For(&infrav1.HetznerBareMetalMachine{}).
- WithEventFilter(predicates.ResourceNotPausedAndHasFilterLabel(log, r.WatchFilterValue)).
+ WithEventFilter(predicates.ResourceNotPausedAndHasFilterLabel(mgr.GetScheme(), log, r.WatchFilterValue)).
Watches(
&clusterv1.Machine{},
handler.EnqueueRequestsFromMapFunc(util.MachineToInfrastructureMapFunc(infrav1.GroupVersion.WithKind("HetznerBareMetalMachine"))),
@@ -235,7 +235,7 @@ func (r *HetznerBareMetalMachineReconciler) SetupWithManager(ctx context.Context
Watches(
&clusterv1.Cluster{},
handler.EnqueueRequestsFromMapFunc(clusterToObjectFunc),
- builder.WithPredicates(predicates.ClusterUnpausedAndInfrastructureReady(log)),
+ builder.WithPredicates(predicates.ClusterPausedTransitionsOrInfrastructureReady(mgr.GetScheme(), log)),
).
Complete(r)
if err != nil {
diff --git a/controllers/hetznerbaremetalmachine_controller_test.go b/controllers/hetznerbaremetalmachine_controller_test.go
index 5fb0784c7..8dfc1afa9 100644
--- a/controllers/hetznerbaremetalmachine_controller_test.go
+++ b/controllers/hetznerbaremetalmachine_controller_test.go
@@ -791,7 +791,6 @@ var _ = Describe("HetznerBareMetalMachineReconciler", func() {
},
},
PortAfterInstallImage: 2222,
- PortAfterCloudInit: 2222,
},
},
},
@@ -997,8 +996,12 @@ var _ = Describe("HetznerBareMetalMachineReconciler", func() {
Eventually(func() bool {
Expect(testEnv.Get(ctx, client.ObjectKeyFromObject(waitingMachine),
waitingMachine)).To(Succeed())
- return waitingMachine.Status.Ready
- }, timeout).Should(BeTrue())
+ if waitingMachine.Status.Ready {
+ return true
+ }
+ testEnv.GetLogger().Info("Waiting for machine to be ready", "status", waitingMachine.Status)
+ return false
+ }, timeout, interval).Should(BeTrue())
})
})
})
diff --git a/controllers/hetznerbaremetalremediation_controller.go b/controllers/hetznerbaremetalremediation_controller.go
index cd61505da..cfd012cf8 100644
--- a/controllers/hetznerbaremetalremediation_controller.go
+++ b/controllers/hetznerbaremetalremediation_controller.go
@@ -18,6 +18,7 @@ package controllers
import (
"context"
+ "errors"
"fmt"
apierrors "k8s.io/apimachinery/pkg/api/errors"
@@ -139,9 +140,9 @@ func (r *HetznerBareMetalRemediationReconciler) Reconcile(ctx context.Context, r
patchOpts := []patch.Option{}
patchOpts = append(patchOpts, patch.WithStatusObservedGeneration{})
- if err := remediationScope.Close(ctx, patchOpts...); err != nil && reterr == nil {
+ if err := remediationScope.Close(ctx, patchOpts...); err != nil {
res = reconcile.Result{}
- reterr = err
+ reterr = errors.Join(reterr, err)
}
}()
@@ -170,6 +171,6 @@ func (r *HetznerBareMetalRemediationReconciler) SetupWithManager(ctx context.Con
return ctrl.NewControllerManagedBy(mgr).
For(&infrav1.HetznerBareMetalRemediation{}).
WithOptions(options).
- WithEventFilter(predicates.ResourceNotPausedAndHasFilterLabel(ctrl.LoggerFrom(ctx), r.WatchFilterValue)).
+ WithEventFilter(predicates.ResourceNotPausedAndHasFilterLabel(mgr.GetScheme(), ctrl.LoggerFrom(ctx), r.WatchFilterValue)).
Complete(r)
}
diff --git a/controllers/hetznercluster_controller.go b/controllers/hetznercluster_controller.go
index 1e2f1c9a9..a66f971fb 100644
--- a/controllers/hetznercluster_controller.go
+++ b/controllers/hetznercluster_controller.go
@@ -37,6 +37,7 @@ import (
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
"k8s.io/klog/v2"
+ "k8s.io/utils/ptr"
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
"sigs.k8s.io/cluster-api/util"
"sigs.k8s.io/cluster-api/util/annotations"
@@ -151,9 +152,9 @@ func (r *HetznerClusterReconciler) Reconcile(ctx context.Context, req ctrl.Reque
conditions.MarkTrue(hetznerCluster, infrav1.HCloudTokenAvailableCondition)
}
- if err := clusterScope.Close(ctx); err != nil && reterr == nil {
+ if err := clusterScope.Close(ctx); err != nil {
res = reconcile.Result{}
- reterr = err
+ reterr = errors.Join(reterr, err)
}
}()
@@ -733,7 +734,17 @@ func (r *HetznerClusterReconciler) newTargetClusterManager(ctx context.Context,
clusterName: clusterScope.Cluster.Name,
}
- if err := gr.SetupWithManager(ctx, clusterMgr, controller.Options{}); err != nil {
+ if err := gr.SetupWithManager(ctx, clusterMgr, controller.Options{
+ // SkipNameValidation. Avoid this error: failed to setup CSR controller: controller with
+ // name certificatesigningrequest already exists. Controller names must be unique to
+ // avoid multiple controllers reporting the same metric. This validation can be disabled
+ // via the SkipNameValidation option
+ //
+ // By default, controller names must be unique (to prevent duplicate Prometheus
+ // metrics). In our case the name is not unique, because it gets executed for every
+ // workload cluster.
+ SkipNameValidation: ptr.To(true),
+ }); err != nil {
return nil, fmt.Errorf("failed to setup CSR controller: %w", err)
}
}
@@ -752,8 +763,8 @@ func (r *HetznerClusterReconciler) SetupWithManager(ctx context.Context, mgr ctr
err := ctrl.NewControllerManagedBy(mgr).
WithOptions(options).
For(&infrav1.HetznerCluster{}).
- WithEventFilter(predicates.ResourceNotPausedAndHasFilterLabel(log, r.WatchFilterValue)).
- WithEventFilter(predicates.ResourceIsNotExternallyManaged(log)).
+ WithEventFilter(predicates.ResourceNotPausedAndHasFilterLabel(mgr.GetScheme(), log, r.WatchFilterValue)).
+ WithEventFilter(predicates.ResourceIsNotExternallyManaged(mgr.GetScheme(), log)).
WithEventFilter(IgnoreInsignificantHetznerClusterStatusUpdates(log)).
Owns(&corev1.Secret{}).
Watches(
diff --git a/controllers/hetznercluster_controller_test.go b/controllers/hetznercluster_controller_test.go
index 52d6eaac9..0359f0eb7 100644
--- a/controllers/hetznercluster_controller_test.go
+++ b/controllers/hetznercluster_controller_test.go
@@ -530,7 +530,7 @@ var _ = Describe("Hetzner ClusterReconciler", func() {
}, timeout, time.Second).Should(BeTrue())
})
- It("should take over an existing load balancer with correct name", func() {
+ It("should take over an existing load balancer with correct name (flaky)", func() {
By("creating load balancer manually")
opts := hcloud.LoadBalancerCreateOpts{
diff --git a/docs/README.md b/docs/README.md
index ef32fee9d..98fb2a0c8 100644
--- a/docs/README.md
+++ b/docs/README.md
@@ -35,3 +35,4 @@ This is the official documentation of Cluster API Provider Hetzner. Before start
- [Releasing](/docs/caph/04-developers/03-releasing.md)
- [Updating Kubernetes version](/docs/caph/04-developers/04-updating-kubernetes-version.md)
- [pre-provision-command](/docs/caph/04-developers/05-pre-provision-command.md)
+- [image-url-command](/docs/caph/04-developers/06-image-url-command.md)
diff --git a/docs/caph/02-topics/05-baremetal/03-creating-workload-cluster.md b/docs/caph/02-topics/05-baremetal/03-creating-workload-cluster.md
index 7d66271a3..e0811a7a8 100644
--- a/docs/caph/02-topics/05-baremetal/03-creating-workload-cluster.md
+++ b/docs/caph/02-topics/05-baremetal/03-creating-workload-cluster.md
@@ -78,7 +78,7 @@ Let's deploy the hetzner CCM helm chart.
helm repo add syself https://charts.syself.com
helm repo update syself
-$ helm upgrade --install ccm syself/ccm-hetzner --version 1.1.10 \
+$ helm upgrade --install ccm syself/ccm-hetzner --version 2.0.1 \
--namespace kube-system \
--kubeconfig workload-kubeconfig
Release "ccm" does not exist. Installing it now.
diff --git a/docs/caph/03-reference/06-hetzner-bare-metal-machine-template.md b/docs/caph/03-reference/06-hetzner-bare-metal-machine-template.md
index 3ccd4d6be..96ec90f16 100644
--- a/docs/caph/03-reference/06-hetzner-bare-metal-machine-template.md
+++ b/docs/caph/03-reference/06-hetzner-bare-metal-machine-template.md
@@ -78,8 +78,8 @@ Via MatchLabels you can specify a certain label (key and value) that identifies
| `template.spec.sshSpec.secretRef.key.name` | `string` | | yes | Name is the key in the secret's data where the SSH key's name is stored |
| `template.spec.sshSpec.secretRef.key.publicKey` | `string` | | yes | PublicKey is the key in the secret's data where the SSH key's public key is stored |
| `template.spec.sshSpec.secretRef.key.privateKey` | `string` | | yes | PrivateKey is the key in the secret's data where the SSH key's private key is stored |
-| `template.spec.sshSpec.portAfterInstallImage` | `int` | `22` | no | PortAfterInstallImage specifies the port that can be used to reach the server via SSH after install image completed successfully |
-| `template.spec.sshSpec.portAfterCloudInit` | `int` | `22` (install image port) | no | PortAfterCloudInit specifies the port that can be used to reach the server via SSH after cloud init completed successfully |
+| `template.spec.sshSpec.portAfterInstallImage` | `int` | `22` | no | PortAfterInstallImage specifies the port that can be used to reach the server via SSH after install image completed successfully. If `--baremetal-ssh-after-install-image=false` is set, then this value will never be used. |
+| `template.spec.sshSpec.portAfterCloudInit` | `int` | `22` (install image port) | no | PortAfterCloudInit specifies the port that can be used to reach the server via SSH after cloud init completed successfully. Deprecated. Since [PR Install Cloud-Init-Data via post-install.sh #1407](https://github.com/syself/cluster-api-provider-hetzner/pull/1407) this field is not functional. |
## installImage.image
diff --git a/docs/caph/04-developers/06-image-url-command.md b/docs/caph/04-developers/06-image-url-command.md
new file mode 100644
index 000000000..6c3728b40
--- /dev/null
+++ b/docs/caph/04-developers/06-image-url-command.md
@@ -0,0 +1,64 @@
+---
+title: image-url-command
+metatitle: Cluster API Provider Hetzner Custom Command to Install Node Image via imageURL
+sidebar: image-url-command
+description: Documentation on the CAPH image-url-command
+---
+
+The `--hcloud-image-url-command` and `--baremtal-image-url-command` for the caph controller can be
+used to execute a custom command to install the node image.
+
+This provides you a flexible way to create nodes.
+
+The script/binary will be copied into the rescue system and executed.
+
+You need to enable two things:
+
+* The caph binary must get argument. Example:
+ `--[hcloud|baremetal]-image-url-command=/shared/image-url-command.sh`
+* for hcloud: The hcloudmachine resource must have spec.imageURL set (usually via a
+ hcloudmachinetemplate)
+* for baremetal: The hetznerbaremetal resource must use `useCustomImageURLCommand: true`.
+
+The command will get the imageURL, bootstrap-data, machine-name of the corresponding
+machine and the root devices (seperated by spaces) as argument.
+
+Example:
+
+```bash
+/root/image-url-command oci://example.com/yourimage:v1 /root/bootstrap.data my-md-bm-kh57r-5z2v8-zdfc9 'sda sdb'
+```
+
+It is up to the command to download from that URL and provision the disk accordingly. This command
+must be accessible by the controller pod. You can use an initContainer to copy the command to a
+shared emptyDir.
+
+The env var OCI_REGISTRY_AUTH_TOKEN from the caph process will be set for the command, too.
+
+The command must end with the last line containing IMAGE_URL_DONE. Otherwise the execution is
+considered to have failed.
+
+The controller uses url.ParseRequestURI (Go function) to validate the imageURL.
+
+A Kubernetes event will be created in both (success, failure) cases containing the output (stdout
+and stderr) of the script. If the script takes longer than 7 minutes, the controller cancels the
+provisioning.
+
+We measured these durations for hcloud:
+
+| oldState | newState | avg(s) | min(s) | max(s) |
+|----------|----------|-------:|-------:|-------:|
+| | Initializing | 3.30 | 2.00 | 5.00 |
+| Initializing | EnablingRescue | 19.20 | 11.00 | 21.00 |
+| EnablingRescue | BootingToRescue | 14.20 | 9.00 | 23.00 |
+| BootingToRescue | RunningImageCommand | 38.20 | 37.00 | 42.00 |
+| RunningImageCommand | BootingToRealOS | 62.40 | 56.00 | 80.00 |
+| BootingToRealOS | OperatingSystemRunning | 1.80 | 1.00 | 3.00 |
+
+
+
+The duration of the state `RunningImageCommand` depends heavily on your script.
diff --git a/go.mod b/go.mod
index 243d3ba4b..54664f1fe 100644
--- a/go.mod
+++ b/go.mod
@@ -1,153 +1,157 @@
module github.com/syself/cluster-api-provider-hetzner
-go 1.23.7
+go 1.24.5
require (
github.com/blang/semver/v4 v4.0.0
github.com/bramvdbogaerde/go-scp v1.5.0
- github.com/go-logr/logr v1.4.2
+ github.com/go-logr/logr v1.4.3
github.com/go-logr/zapr v1.3.0
- github.com/guettli/check-conditions v0.0.9
- github.com/hetznercloud/hcloud-go/v2 v2.19.1
- github.com/onsi/ginkgo/v2 v2.23.0
- github.com/onsi/gomega v1.36.2
- github.com/prometheus/common v0.63.0
- github.com/spf13/pflag v1.0.6
- github.com/stoewer/go-strcase v1.3.0
+ github.com/google/go-cmp v0.7.0
+ github.com/guettli/check-conditions v0.0.20
+ github.com/hetznercloud/hcloud-go/v2 v2.22.0
+ github.com/mitchellh/copystructure v1.2.0
+ github.com/onsi/ginkgo/v2 v2.23.4
+ github.com/onsi/gomega v1.38.0
+ github.com/prometheus/common v0.65.0
+ github.com/spf13/pflag v1.0.7
+ github.com/stoewer/go-strcase v1.3.1
github.com/stretchr/testify v1.10.0
- github.com/syself/hrobot-go v0.2.6
+ github.com/syself/hrobot-go v0.2.7
go.uber.org/zap v1.27.0
- golang.org/x/crypto v0.36.0
- golang.org/x/exp v0.0.0-20250305212735-054e65f0b394
- golang.org/x/mod v0.24.0
- k8s.io/api v0.30.3
- k8s.io/apimachinery v0.30.3
- k8s.io/apiserver v0.30.3
- k8s.io/client-go v0.30.3
+ golang.org/x/crypto v0.40.0
+ golang.org/x/exp v0.0.0-20250718183923-645b1fa84792
+ golang.org/x/mod v0.26.0
+ k8s.io/api v0.32.7
+ k8s.io/apimachinery v0.32.7
+ k8s.io/apiserver v0.32.7
+ k8s.io/client-go v0.32.7
k8s.io/klog/v2 v2.130.1
- k8s.io/kubectl v0.30.3
- k8s.io/utils v0.0.0-20241210054802-24370beab758
- sigs.k8s.io/cluster-api v1.8.10
- sigs.k8s.io/cluster-api/test v1.8.10
- sigs.k8s.io/controller-runtime v0.18.7
- sigs.k8s.io/kind v0.27.0
+ k8s.io/kubectl v0.32.7
+ k8s.io/utils v0.0.0-20250604170112-4c0f3b243397
+ sigs.k8s.io/cluster-api v1.10.4
+ sigs.k8s.io/cluster-api/test v1.10.4
+ sigs.k8s.io/controller-runtime v0.20.4
+ sigs.k8s.io/kind v0.29.0
)
require (
al.essio.dev/pkg/shellescape v1.5.1 // indirect
+ cel.dev/expr v0.18.0 // indirect
+ dario.cat/mergo v1.0.1 // indirect
github.com/BurntSushi/toml v1.4.0 // indirect
github.com/MakeNowJust/heredoc v1.0.0 // indirect
github.com/Masterminds/goutils v1.1.1 // indirect
- github.com/Masterminds/semver/v3 v3.2.0 // indirect
- github.com/Masterminds/sprig/v3 v3.2.3 // indirect
+ github.com/Masterminds/semver/v3 v3.3.0 // indirect
+ github.com/Masterminds/sprig/v3 v3.3.0 // indirect
github.com/Microsoft/go-winio v0.5.0 // indirect
github.com/ProtonMail/go-crypto v0.0.0-20230217124315-7d5c6f04bbb8 // indirect
- github.com/adrg/xdg v0.5.0 // indirect
- github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230305170008-8188dc5388df // indirect
+ github.com/adrg/xdg v0.5.3 // indirect
+ github.com/antlr4-go/antlr/v4 v4.13.0 // indirect
github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a // indirect
github.com/beorn7/perks v1.0.1 // indirect
- github.com/cenkalti/backoff/v4 v4.2.1 // indirect
+ github.com/cenkalti/backoff/v4 v4.3.0 // indirect
github.com/cespare/xxhash/v2 v2.3.0 // indirect
- github.com/cloudflare/circl v1.3.7 // indirect
+ github.com/cloudflare/circl v1.6.1 // indirect
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
github.com/distribution/reference v0.6.0 // indirect
- github.com/docker/docker v27.1.1+incompatible // indirect
+ github.com/docker/docker v28.0.2+incompatible // indirect
github.com/docker/go-connections v0.5.0 // indirect
github.com/docker/go-units v0.4.0 // indirect
github.com/drone/envsubst/v2 v2.0.0-20210730161058-179042472c46 // indirect
- github.com/emicklei/go-restful/v3 v3.12.1 // indirect
- github.com/evanphx/json-patch v5.7.0+incompatible // indirect
- github.com/evanphx/json-patch/v5 v5.9.0 // indirect
+ github.com/emicklei/go-restful/v3 v3.12.2 // indirect
+ github.com/evanphx/json-patch/v5 v5.9.11 // indirect
+ github.com/fatih/color v1.18.0 // indirect
github.com/felixge/httpsnoop v1.0.4 // indirect
- github.com/fsnotify/fsnotify v1.7.0 // indirect
+ github.com/fsnotify/fsnotify v1.8.0 // indirect
+ github.com/fxamacker/cbor/v2 v2.7.0 // indirect
github.com/go-logr/stdr v1.2.2 // indirect
- github.com/go-openapi/jsonpointer v0.19.6 // indirect
+ github.com/go-openapi/jsonpointer v0.21.0 // indirect
github.com/go-openapi/jsonreference v0.20.2 // indirect
- github.com/go-openapi/swag v0.22.3 // indirect
+ github.com/go-openapi/swag v0.23.0 // indirect
github.com/go-task/slim-sprig/v3 v3.0.0 // indirect
- github.com/gobuffalo/flect v1.0.2 // indirect
+ github.com/go-viper/mapstructure/v2 v2.3.0 // indirect
+ github.com/gobuffalo/flect v1.0.3 // indirect
github.com/gogo/protobuf v1.3.2 // indirect
- github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
github.com/golang/protobuf v1.5.4 // indirect
- github.com/google/cel-go v0.17.8 // indirect
- github.com/google/gnostic-models v0.6.8 // indirect
- github.com/google/go-cmp v0.7.0 // indirect
+ github.com/google/btree v1.1.3 // indirect
+ github.com/google/cel-go v0.22.0 // indirect
+ github.com/google/gnostic-models v0.6.9 // indirect
github.com/google/go-github/v53 v53.2.0 // indirect
github.com/google/go-querystring v1.1.0 // indirect
github.com/google/gofuzz v1.2.0 // indirect
- github.com/google/pprof v0.0.0-20241210010833-40e02aabc2ad // indirect
- github.com/google/safetext v0.0.0-20220905092116-b49f7bc46da2 // indirect
+ github.com/google/pprof v0.0.0-20250403155104-27863c87afa6 // indirect
github.com/google/uuid v1.6.0 // indirect
- github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 // indirect
- github.com/hashicorp/hcl v1.0.0 // indirect
- github.com/huandu/xstrings v1.3.3 // indirect
- github.com/imdario/mergo v0.3.13 // indirect
+ github.com/grpc-ecosystem/grpc-gateway/v2 v2.24.0 // indirect
+ github.com/huandu/xstrings v1.5.0 // indirect
github.com/inconshreveable/mousetrap v1.1.0 // indirect
github.com/josharian/intern v1.0.0 // indirect
github.com/json-iterator/go v1.1.12 // indirect
- github.com/klauspost/compress v1.17.9 // indirect
- github.com/magiconair/properties v1.8.7 // indirect
github.com/mailru/easyjson v0.7.7 // indirect
+ github.com/mattn/go-colorable v0.1.13 // indirect
github.com/mattn/go-isatty v0.0.20 // indirect
- github.com/mitchellh/copystructure v1.2.0 // indirect
- github.com/mitchellh/mapstructure v1.5.0 // indirect
+ github.com/mattn/go-runewidth v0.0.14 // indirect
github.com/mitchellh/reflectwalk v1.0.2 // indirect
github.com/moby/docker-image-spec v1.3.1 // indirect
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
github.com/modern-go/reflect2 v1.0.2 // indirect
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
+ github.com/olekukonko/tablewriter v0.0.5 // indirect
github.com/opencontainers/go-digest v1.0.0 // indirect
github.com/opencontainers/image-spec v1.0.2 // indirect
github.com/pelletier/go-toml v1.9.5 // indirect
- github.com/pelletier/go-toml/v2 v2.2.2 // indirect
+ github.com/pelletier/go-toml/v2 v2.2.3 // indirect
github.com/pkg/errors v0.9.1 // indirect
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
- github.com/prometheus/client_golang v1.20.5 // indirect
- github.com/prometheus/client_model v0.6.1 // indirect
+ github.com/prometheus/client_golang v1.22.0 // indirect
+ github.com/prometheus/client_model v0.6.2 // indirect
github.com/prometheus/procfs v0.15.1 // indirect
- github.com/sagikazarmark/locafero v0.4.0 // indirect
- github.com/sagikazarmark/slog-shim v0.1.0 // indirect
- github.com/shopspring/decimal v1.3.1 // indirect
+ github.com/rivo/uniseg v0.4.2 // indirect
+ github.com/sagikazarmark/locafero v0.7.0 // indirect
+ github.com/shopspring/decimal v1.4.0 // indirect
github.com/sourcegraph/conc v0.3.0 // indirect
- github.com/spf13/afero v1.11.0 // indirect
- github.com/spf13/cast v1.6.0 // indirect
- github.com/spf13/cobra v1.8.1 // indirect
- github.com/spf13/viper v1.19.0 // indirect
+ github.com/spf13/afero v1.12.0 // indirect
+ github.com/spf13/cast v1.7.1 // indirect
+ github.com/spf13/cobra v1.9.1 // indirect
+ github.com/spf13/viper v1.20.0 // indirect
github.com/stretchr/objx v0.5.2 // indirect
github.com/subosito/gotenv v1.6.0 // indirect
github.com/valyala/fastjson v1.6.4 // indirect
- go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0 // indirect
- go.opentelemetry.io/otel v1.24.0 // indirect
- go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.22.0 // indirect
- go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.20.0 // indirect
- go.opentelemetry.io/otel/metric v1.24.0 // indirect
- go.opentelemetry.io/otel/sdk v1.22.0 // indirect
- go.opentelemetry.io/otel/trace v1.24.0 // indirect
- go.opentelemetry.io/proto/otlp v1.0.0 // indirect
+ github.com/x448/float16 v0.8.4 // indirect
+ go.opentelemetry.io/auto/sdk v1.1.0 // indirect
+ go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0 // indirect
+ go.opentelemetry.io/otel v1.33.0 // indirect
+ go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.33.0 // indirect
+ go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.33.0 // indirect
+ go.opentelemetry.io/otel/metric v1.33.0 // indirect
+ go.opentelemetry.io/otel/sdk v1.33.0 // indirect
+ go.opentelemetry.io/otel/trace v1.33.0 // indirect
+ go.opentelemetry.io/proto/otlp v1.4.0 // indirect
+ go.uber.org/automaxprocs v1.6.0 // indirect
go.uber.org/multierr v1.11.0 // indirect
- golang.org/x/net v0.37.0 // indirect
- golang.org/x/oauth2 v0.25.0 // indirect
- golang.org/x/sync v0.12.0 // indirect
- golang.org/x/sys v0.31.0 // indirect
- golang.org/x/term v0.30.0 // indirect
- golang.org/x/text v0.23.0 // indirect
- golang.org/x/time v0.5.0 // indirect
- golang.org/x/tools v0.31.0 // indirect
- gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect
- google.golang.org/genproto/googleapis/api v0.0.0-20240311132316-a219d84964c2 // indirect
- google.golang.org/genproto/googleapis/rpc v0.0.0-20240314234333-6e1732d8331c // indirect
- google.golang.org/grpc v1.62.2 // indirect
- google.golang.org/protobuf v1.36.5 // indirect
+ golang.org/x/net v0.42.0 // indirect
+ golang.org/x/oauth2 v0.30.0 // indirect
+ golang.org/x/sync v0.16.0 // indirect
+ golang.org/x/sys v0.34.0 // indirect
+ golang.org/x/term v0.33.0 // indirect
+ golang.org/x/text v0.27.0 // indirect
+ golang.org/x/time v0.9.0 // indirect
+ golang.org/x/tools v0.35.0 // indirect
+ gomodules.xyz/jsonpatch/v2 v2.5.0 // indirect
+ google.golang.org/genproto/googleapis/api v0.0.0-20241209162323-e6fa225c2576 // indirect
+ google.golang.org/genproto/googleapis/rpc v0.0.0-20241223144023-3abc09e42ca8 // indirect
+ google.golang.org/grpc v1.68.1 // indirect
+ google.golang.org/protobuf v1.36.6 // indirect
+ gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect
gopkg.in/inf.v0 v0.9.1 // indirect
- gopkg.in/ini.v1 v1.67.0 // indirect
- gopkg.in/yaml.v2 v2.4.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
- k8s.io/apiextensions-apiserver v0.30.3 // indirect
- k8s.io/cluster-bootstrap v0.30.3 // indirect
- k8s.io/component-base v0.30.3 // indirect
- k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 // indirect
- sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.30.0 // indirect
- sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect
- sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect
+ k8s.io/apiextensions-apiserver v0.32.7 // indirect
+ k8s.io/cluster-bootstrap v0.32.3 // indirect
+ k8s.io/component-base v0.32.7 // indirect
+ k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff // indirect
+ sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.2 // indirect
+ sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 // indirect
+ sigs.k8s.io/randfill v1.0.0 // indirect
+ sigs.k8s.io/structured-merge-diff/v4 v4.6.0 // indirect
sigs.k8s.io/yaml v1.4.0 // indirect
)
diff --git a/go.sum b/go.sum
index 45b3c9a0f..3dffbacb9 100644
--- a/go.sum
+++ b/go.sum
@@ -1,25 +1,29 @@
al.essio.dev/pkg/shellescape v1.5.1 h1:86HrALUujYS/h+GtqoB26SBEdkWfmMI6FubjXlsXyho=
al.essio.dev/pkg/shellescape v1.5.1/go.mod h1:6sIqp7X2P6mThCQ7twERpZTuigpr6KbZWtls1U8I890=
-github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 h1:UQHMgLO+TxOElx5B5HZ4hJQsoJ/PvUvKRhJHDQXO8P8=
-github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E=
+cel.dev/expr v0.18.0 h1:CJ6drgk+Hf96lkLikr4rFf19WrU0BOWEihyZnI2TAzo=
+cel.dev/expr v0.18.0/go.mod h1:MrpN08Q+lEBs+bGYdLxxHkZoUSsCp0nSKTs0nTymJgw=
+dario.cat/mergo v1.0.1 h1:Ra4+bf83h2ztPIQYNP99R6m+Y7KfnARDfID+a+vLl4s=
+dario.cat/mergo v1.0.1/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk=
+github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 h1:L/gRVlceqvL25UVaW/CKtUDjefjrs0SPonmDGUVOYP0=
+github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E=
github.com/BurntSushi/toml v1.4.0 h1:kuoIxZQy2WRRk1pttg9asf+WVv6tWQuBNVmK8+nqPr0=
github.com/BurntSushi/toml v1.4.0/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho=
github.com/MakeNowJust/heredoc v1.0.0 h1:cXCdzVdstXyiTqTvfqk9SDHpKNjxuom+DOlyEeQ4pzQ=
github.com/MakeNowJust/heredoc v1.0.0/go.mod h1:mG5amYoWBHf8vpLOuehzbGGw0EHxpZZ6lCpQ4fNJ8LE=
github.com/Masterminds/goutils v1.1.1 h1:5nUrii3FMTL5diU80unEVvNevw1nH4+ZV4DSLVJLSYI=
github.com/Masterminds/goutils v1.1.1/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU=
-github.com/Masterminds/semver/v3 v3.2.0 h1:3MEsd0SM6jqZojhjLWWeBY+Kcjy9i6MQAeY7YgDP83g=
-github.com/Masterminds/semver/v3 v3.2.0/go.mod h1:qvl/7zhW3nngYb5+80sSMF+FG2BjYrf8m9wsX0PNOMQ=
-github.com/Masterminds/sprig/v3 v3.2.3 h1:eL2fZNezLomi0uOLqjQoN6BfsDD+fyLtgbJMAj9n6YA=
-github.com/Masterminds/sprig/v3 v3.2.3/go.mod h1:rXcFaZ2zZbLRJv/xSysmlgIM1u11eBaRMhvYXJNkGuM=
+github.com/Masterminds/semver/v3 v3.3.0 h1:B8LGeaivUe71a5qox1ICM/JLl0NqZSW5CHyL+hmvYS0=
+github.com/Masterminds/semver/v3 v3.3.0/go.mod h1:4V+yj/TJE1HU9XfppCwVMZq3I84lprf4nC11bSS5beM=
+github.com/Masterminds/sprig/v3 v3.3.0 h1:mQh0Yrg1XPo6vjYXgtf5OtijNAKJRNcTdOOGZe3tPhs=
+github.com/Masterminds/sprig/v3 v3.3.0/go.mod h1:Zy1iXRYNqNLUolqCpL4uhk6SHUMAOSCzdgBfDb35Lz0=
github.com/Microsoft/go-winio v0.5.0 h1:Elr9Wn+sGKPlkaBvwu4mTrxtmOp3F3yV9qhaHbXGjwU=
github.com/Microsoft/go-winio v0.5.0/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84=
github.com/ProtonMail/go-crypto v0.0.0-20230217124315-7d5c6f04bbb8 h1:wPbRQzjjwFc0ih8puEVAOFGELsn1zoIIYdxvML7mDxA=
github.com/ProtonMail/go-crypto v0.0.0-20230217124315-7d5c6f04bbb8/go.mod h1:I0gYDMZ6Z5GRU7l58bNFSkPTFN6Yl12dsUlAZ8xy98g=
-github.com/adrg/xdg v0.5.0 h1:dDaZvhMXatArP1NPHhnfaQUqWBLBsmx1h1HXQdMoFCY=
-github.com/adrg/xdg v0.5.0/go.mod h1:dDdY4M4DF9Rjy4kHPeNL+ilVF+p2lK8IdM9/rTSGcI4=
-github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230305170008-8188dc5388df h1:7RFfzj4SSt6nnvCPbCqijJi1nWCd+TqAT3bYCStRC18=
-github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230305170008-8188dc5388df/go.mod h1:pSwJ0fSY5KhvocuWSx4fz3BA8OrA1bQn+K1Eli3BRwM=
+github.com/adrg/xdg v0.5.3 h1:xRnxJXne7+oWDatRhR1JLnvuccuIeCoBu2rtuLqQB78=
+github.com/adrg/xdg v0.5.3/go.mod h1:nlTsY+NNiCBGCK2tpm09vRqfVzrc2fLmXGpBLF0zlTQ=
+github.com/antlr4-go/antlr/v4 v4.13.0 h1:lxCg3LAv+EUK6t1i0y1V6/SLeUi0eKEKdhQAlS8TVTI=
+github.com/antlr4-go/antlr/v4 v4.13.0/go.mod h1:pfChB/xh/Unjila75QW7+VU4TSnWnnk9UTnmpPaOR2g=
github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a h1:idn718Q4B6AGu/h5Sxe66HYVdqdGu2l9Iebqhi/AEoA=
github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY=
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
@@ -29,25 +33,25 @@ github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2y
github.com/bramvdbogaerde/go-scp v1.5.0 h1:a9BinAjTfQh273eh7vd3qUgmBC+bx+3TRDtkZWmIpzM=
github.com/bramvdbogaerde/go-scp v1.5.0/go.mod h1:on2aH5AxaFb2G0N5Vsdy6B0Ml7k9HuHSwfo1y0QzAbQ=
github.com/bwesterb/go-ristretto v1.2.0/go.mod h1:fUIoIZaG73pV5biE2Blr2xEzDoMj7NFEuV9ekS419A0=
-github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqylYbM=
-github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE=
+github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8=
+github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE=
github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/cloudflare/circl v1.1.0/go.mod h1:prBCrKB9DV4poKZY1l9zBXg2QJY7mvgRvtMxxK7fi4I=
-github.com/cloudflare/circl v1.3.7 h1:qlCDlTPz2n9fu58M0Nh1J/JzcFpfgkFHHX3O35r5vcU=
-github.com/cloudflare/circl v1.3.7/go.mod h1:sRTcRWXGLrKw6yIGJ+l7amYJFfAXbZG0kBSc8r4zxgA=
+github.com/cloudflare/circl v1.6.1 h1:zqIqSPIndyBh1bjLVVDHMPpVKqp8Su/V+6MeDzzQBQ0=
+github.com/cloudflare/circl v1.6.1/go.mod h1:uddAzsPgqdMAYatqJ0lsjX1oECcQLIlRpzZh3pJrofs=
github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I=
github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo=
github.com/coredns/caddy v1.1.1 h1:2eYKZT7i6yxIfGP3qLJoJ7HAsDJqYB+X68g4NYjSrE0=
github.com/coredns/caddy v1.1.1/go.mod h1:A6ntJQlAWuQfFlsd9hvigKbo2WS0VUs2l1e2F+BawD4=
-github.com/coredns/corefile-migration v1.0.25 h1:/XexFhM8FFlFLTS/zKNEWgIZ8Gl5GaWrHsMarGj/PRQ=
-github.com/coredns/corefile-migration v1.0.25/go.mod h1:56DPqONc3njpVPsdilEnfijCwNGC3/kTJLl7i7SPavY=
+github.com/coredns/corefile-migration v1.0.26 h1:xiiEkVB1Dwolb24pkeDUDBfygV9/XsOSq79yFCrhptY=
+github.com/coredns/corefile-migration v1.0.26/go.mod h1:56DPqONc3njpVPsdilEnfijCwNGC3/kTJLl7i7SPavY=
github.com/coreos/go-semver v0.3.1 h1:yi21YpKnrx1gt5R+la8n5WgS0kCrsPp33dmEyHReZr4=
github.com/coreos/go-semver v0.3.1/go.mod h1:irMmmIw/7yzSRPWryHsK7EYSg09caPQL03VsM8rvUec=
github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf h1:iW4rZ826su+pqaw19uhpSCzhj44qo35pNgKFGqzDKkU=
github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs=
github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
-github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
+github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g=
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
@@ -55,55 +59,61 @@ github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk=
github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E=
-github.com/docker/docker v27.1.1+incompatible h1:hO/M4MtV36kzKldqnA37IWhebRA+LnqqcqDja6kVaKY=
-github.com/docker/docker v27.1.1+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
+github.com/docker/docker v28.0.2+incompatible h1:9BILleFwug5FSSqWBgVevgL3ewDJfWWWyZVqlDMttE8=
+github.com/docker/docker v28.0.2+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c=
github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc=
github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw=
github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
github.com/drone/envsubst/v2 v2.0.0-20210730161058-179042472c46 h1:7QPwrLT79GlD5sizHf27aoY2RTvw62mO6x7mxkScNk0=
github.com/drone/envsubst/v2 v2.0.0-20210730161058-179042472c46/go.mod h1:esf2rsHFNlZlxsqsZDojNBcnNs5REqIvRrWRHqX0vEU=
-github.com/emicklei/go-restful/v3 v3.12.1 h1:PJMDIM/ak7btuL8Ex0iYET9hxM3CI2sjZtzpL63nKAU=
-github.com/emicklei/go-restful/v3 v3.12.1/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
+github.com/emicklei/go-restful/v3 v3.12.2 h1:DhwDP0vY3k8ZzE0RunuJy8GhNpPL6zqLkDf9B/a0/xU=
+github.com/emicklei/go-restful/v3 v3.12.2/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
github.com/evanphx/json-patch v5.7.0+incompatible h1:vgGkfT/9f8zE6tvSCe74nfpAVDQ2tG6yudJd8LBksgI=
github.com/evanphx/json-patch v5.7.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
-github.com/evanphx/json-patch/v5 v5.9.0 h1:kcBlZQbplgElYIlo/n1hJbls2z/1awpXxpRi0/FOJfg=
-github.com/evanphx/json-patch/v5 v5.9.0/go.mod h1:VNkHZ/282BpEyt/tObQO8s5CMPmYYq14uClGH4abBuQ=
+github.com/evanphx/json-patch/v5 v5.9.11 h1:/8HVnzMq13/3x9TPvjG08wUGqBTmZBsCWzjTM0wiaDU=
+github.com/evanphx/json-patch/v5 v5.9.11/go.mod h1:3j+LviiESTElxA4p3EMKAB9HXj3/XEtnUf6OZxqIQTM=
+github.com/fatih/color v1.18.0 h1:S8gINlzdQ840/4pfAwic/ZE0djQEH3wM94VfqLTZcOM=
+github.com/fatih/color v1.18.0/go.mod h1:4FelSpRwEGDpQ12mAdzqdOukCy4u8WUtOY6lkT/6HfU=
github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg=
github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8=
github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0=
-github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA=
-github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM=
+github.com/fsnotify/fsnotify v1.8.0 h1:dAwr6QBTBZIkG8roQaJjGof0pp0EeF+tNV7YBP3F/8M=
+github.com/fsnotify/fsnotify v1.8.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0=
+github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E=
+github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ=
github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
-github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY=
-github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
+github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI=
+github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
github.com/go-logr/zapr v1.3.0 h1:XGdV8XW8zdwFiwOA2Dryh1gj2KRQyOOoNmBy4EplIcQ=
github.com/go-logr/zapr v1.3.0/go.mod h1:YKepepNBd1u/oyhd/yQmtjVXmm9uML4IXUgMOwR8/Gg=
-github.com/go-openapi/jsonpointer v0.19.6 h1:eCs3fxoIi3Wh6vtgmLTOjdhSpiqphQ+DaPn38N2ZdrE=
github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs=
+github.com/go-openapi/jsonpointer v0.21.0 h1:YgdVicSA9vH5RiHs9TZW5oyafXZFc6+2Vc1rr/O9oNQ=
+github.com/go-openapi/jsonpointer v0.21.0/go.mod h1:IUyH9l/+uyhIYQ/PXVA41Rexl+kOkAPDdXEYns6fzUY=
github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE=
github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k=
-github.com/go-openapi/swag v0.22.3 h1:yMBqmnQ0gyZvEb/+KzuWZOXgllrXT4SADYbvDaXHv/g=
github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14=
+github.com/go-openapi/swag v0.23.0 h1:vsEVJDUo2hPJ2tu0/Xc+4noaxyEffXNIs3cOULZ+GrE=
+github.com/go-openapi/swag v0.23.0/go.mod h1:esZ8ITTYEsH1V2trKHjAN8Ai7xHb8RV+YSZ577vPjgQ=
github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI=
github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8=
-github.com/gobuffalo/flect v1.0.2 h1:eqjPGSo2WmjgY2XlpGwo2NXgL3RucAKo4k4qQMNA5sA=
-github.com/gobuffalo/flect v1.0.2/go.mod h1:A5msMlrHtLqh9umBSnvabjsMrCcCpAyzglnDvkbYKHs=
+github.com/go-viper/mapstructure/v2 v2.3.0 h1:27XbWsHIqhbdR5TIC911OfYvgSaW93HM+dX7970Q7jk=
+github.com/go-viper/mapstructure/v2 v2.3.0/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM=
+github.com/gobuffalo/flect v1.0.3 h1:xeWBM2nui+qnVvNM4S3foBhCAL2XgPU+a7FdpelbTq4=
+github.com/gobuffalo/flect v1.0.3/go.mod h1:A5msMlrHtLqh9umBSnvabjsMrCcCpAyzglnDvkbYKHs=
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
-github.com/golang/glog v1.2.0 h1:uCdmnmatrKCgMBlM4rMuJZWOkPDqdbZPnrMXDY4gI68=
-github.com/golang/glog v1.2.0/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w=
-github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE=
-github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek=
github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=
-github.com/google/cel-go v0.17.8 h1:j9m730pMZt1Fc4oKhCLUHfjj6527LuhYcYw0Rl8gqto=
-github.com/google/cel-go v0.17.8/go.mod h1:HXZKzB0LXqer5lHHgfWAnlYwJaQBDKMjxjulNQzhwhY=
-github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I=
-github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U=
+github.com/google/btree v1.1.3 h1:CVpQJjYgC4VbzxeGVHfvZrv1ctoYCAI8vbl07Fcxlyg=
+github.com/google/btree v1.1.3/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4=
+github.com/google/cel-go v0.22.0 h1:b3FJZxpiv1vTMo2/5RDUqAHPxkT8mmMfJIrq1llbf7g=
+github.com/google/cel-go v0.22.0/go.mod h1:BuznPXXfQDpXKWQ9sPW3TzlAJN5zzFe+i9tIs0yC4s8=
+github.com/google/gnostic-models v0.6.9 h1:MU/8wDLif2qCXZmzncUQ/BOfxWfthHi63KqpoNbWqVw=
+github.com/google/gnostic-models v0.6.9/go.mod h1:CiWsm0s6BSQd1hRn8/QmxqB6BesYcbSZxsz9b0KuDBw=
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
@@ -116,30 +126,22 @@ github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0=
github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
-github.com/google/pprof v0.0.0-20241210010833-40e02aabc2ad h1:a6HEuzUHeKH6hwfN/ZoQgRgVIWFJljSWa/zetS2WTvg=
-github.com/google/pprof v0.0.0-20241210010833-40e02aabc2ad/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144=
-github.com/google/safetext v0.0.0-20220905092116-b49f7bc46da2 h1:SJ+NtwL6QaZ21U+IrK7d0gGgpjGGvd2kz+FzTHVzdqI=
-github.com/google/safetext v0.0.0-20220905092116-b49f7bc46da2/go.mod h1:Tv1PlzqC9t8wNnpPdctvtSUOPUUg4SHeE6vR1Ir2hmg=
+github.com/google/pprof v0.0.0-20250403155104-27863c87afa6 h1:BHT72Gu3keYf3ZEu2J0b1vyeLSOYI8bm5wbJM/8yDe8=
+github.com/google/pprof v0.0.0-20250403155104-27863c87afa6/go.mod h1:boTsfXsheKC2y+lKOCMpSfarhxDeIzfZG1jqGcPl3cA=
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4=
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ=
-github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho=
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
-github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 h1:YBftPWNWd4WwGqtY2yeZL2ef8rHAxPBD8KFhJpmcqms=
-github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0/go.mod h1:YN5jB8ie0yfIUg6VvR9Kz84aCaG7AsGZnLjhHbUqwPg=
-github.com/guettli/check-conditions v0.0.9 h1:RWIVVxIxWUNWe+KfIFBAEmI+iHGTRRMfZxCMFtV2Xq8=
-github.com/guettli/check-conditions v0.0.9/go.mod h1:6NifCTWJHcTBVv/0BvsF4ed3K/UOQsw7Pfp7kiRKNww=
-github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4=
-github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
-github.com/hetznercloud/hcloud-go/v2 v2.19.1 h1:UU/7h3uc/rdgspM8xkQF7wokmwZXePWDXcLqrQRRzzY=
-github.com/hetznercloud/hcloud-go/v2 v2.19.1/go.mod h1:r5RTzv+qi8IbLcDIskTzxkFIji7Ovc8yNgepQR9M+UA=
-github.com/huandu/xstrings v1.3.3 h1:/Gcsuc1x8JVbJ9/rlye4xZnVAbEkGauT8lbebqcQws4=
-github.com/huandu/xstrings v1.3.3/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE=
-github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA=
-github.com/imdario/mergo v0.3.13 h1:lFzP57bqS/wsqKssCGmtLAb8A0wKjLGrve2q3PPVcBk=
-github.com/imdario/mergo v0.3.13/go.mod h1:4lJ1jqUDcsbIECGy0RUJAXNIhg+6ocWgb1ALK2O4oXg=
+github.com/grpc-ecosystem/grpc-gateway/v2 v2.24.0 h1:TmHmbvxPmaegwhDubVz0lICL0J5Ka2vwTzhoePEXsGE=
+github.com/grpc-ecosystem/grpc-gateway/v2 v2.24.0/go.mod h1:qztMSjm835F2bXf+5HKAPIS5qsmQDqZna/PgVt4rWtI=
+github.com/guettli/check-conditions v0.0.20 h1:veRmfngRayW5BZWNU5HkPgp4Y09sfHs3bLrqZvU3ulU=
+github.com/guettli/check-conditions v0.0.20/go.mod h1:6NifCTWJHcTBVv/0BvsF4ed3K/UOQsw7Pfp7kiRKNww=
+github.com/hetznercloud/hcloud-go/v2 v2.22.0 h1:RwcOkgB5y7kvi9Nxt40lHej8HjaS/P+9Yjfs4Glcds0=
+github.com/hetznercloud/hcloud-go/v2 v2.22.0/go.mod h1:t14Logj+iLXyS03DGwEyrN+y7/C9243CJt3IArTHbyM=
+github.com/huandu/xstrings v1.5.0 h1:2ag3IFq9ZDANvthTwTiqSSZLjDc+BedvHPAp5tJy2TI=
+github.com/huandu/xstrings v1.5.0/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE=
github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY=
@@ -148,8 +150,8 @@ github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnr
github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
-github.com/klauspost/compress v1.17.9 h1:6KIumPrER1LHsvBVuDa0r5xaG0Es51mhhB9BQB2qeMA=
-github.com/klauspost/compress v1.17.9/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw=
+github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo=
+github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ=
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
@@ -159,24 +161,24 @@ github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc=
github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
-github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY=
-github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0=
github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0=
github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
+github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA=
+github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg=
+github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
-github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw=
+github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI=
+github.com/mattn/go-runewidth v0.0.14 h1:+xnbZSEeDbOIg5/mE6JF0w6n9duR1l3/WmbinWVwUuU=
+github.com/mattn/go-runewidth v0.0.14/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw=
github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s=
-github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
-github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
-github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw=
github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ=
github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw=
github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0=
github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo=
-github.com/moby/term v0.0.0-20221205130635-1aeaba878587 h1:HfkjXDfhgVaN5rmueG8cL8KKeFNecRCXFhaJ2qZ5SKA=
-github.com/moby/term v0.0.0-20221205130635-1aeaba878587/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y=
+github.com/moby/term v0.5.0 h1:xt8Q1nalod/v7BqbG21f8mQPqH+xAaC9C3N3wfWbVP0=
+github.com/moby/term v0.5.0/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y=
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
@@ -186,60 +188,63 @@ github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A=
github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc=
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
-github.com/onsi/ginkgo/v2 v2.23.0 h1:FA1xjp8ieYDzlgS5ABTpdUDB7wtngggONc8a7ku2NqQ=
-github.com/onsi/ginkgo/v2 v2.23.0/go.mod h1:zXTP6xIp3U8aVuXN8ENK9IXRaTjFnpVB9mGmaSRvxnM=
-github.com/onsi/gomega v1.36.2 h1:koNYke6TVk6ZmnyHrCXba/T/MoLBXFjeC1PtvYgw0A8=
-github.com/onsi/gomega v1.36.2/go.mod h1:DdwyADRjrc825LhMEkD76cHR5+pUnjhUN8GlHlRPHzY=
+github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec=
+github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY=
+github.com/onsi/ginkgo/v2 v2.23.4 h1:ktYTpKJAVZnDT4VjxSbiBenUjmlL/5QkBEocaWXiQus=
+github.com/onsi/ginkgo/v2 v2.23.4/go.mod h1:Bt66ApGPBFzHyR+JO10Zbt0Gsp4uWxu5mIOTusL46e8=
+github.com/onsi/gomega v1.38.0 h1:c/WX+w8SLAinvuKKQFh77WEucCnPk4j2OTUr7lt7BeY=
+github.com/onsi/gomega v1.38.0/go.mod h1:OcXcwId0b9QsE7Y49u+BTrL4IdKOBOKnD6VQNTJEB6o=
github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
github.com/opencontainers/image-spec v1.0.2 h1:9yCKha/T5XdGtO0q9Q9a6T5NUCsTn/DrBg0D7ufOcFM=
github.com/opencontainers/image-spec v1.0.2/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0=
github.com/pelletier/go-toml v1.9.5 h1:4yBQzkHv+7BHq2PQUZF3Mx0IYxG7LsP222s7Agd3ve8=
github.com/pelletier/go-toml v1.9.5/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c=
-github.com/pelletier/go-toml/v2 v2.2.2 h1:aYUidT7k73Pcl9nb2gScu7NSrKCSHIDE89b3+6Wq+LM=
-github.com/pelletier/go-toml/v2 v2.2.2/go.mod h1:1t835xjRzz80PqgE6HHgN2JOsmgYu/h4qDAS4n929Rs=
+github.com/pelletier/go-toml/v2 v2.2.3 h1:YmeHyLY8mFWbdkNWwpr+qIL2bEqT0o95WSdkNHvL12M=
+github.com/pelletier/go-toml/v2 v2.2.3/go.mod h1:MfCQTFTvCcUyyvvwm1+G6H/jORL20Xlb6rzQu9GuUkc=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U=
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
-github.com/prometheus/client_golang v1.20.5 h1:cxppBPuYhUnsO6yo/aoRol4L7q7UFfdm+bR9r+8l63Y=
-github.com/prometheus/client_golang v1.20.5/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE=
-github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E=
-github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY=
-github.com/prometheus/common v0.63.0 h1:YR/EIY1o3mEFP/kZCD7iDMnLPlGyuU2Gb3HIcXnA98k=
-github.com/prometheus/common v0.63.0/go.mod h1:VVFF/fBIoToEnWRVkYoXEkq3R3paCoxG9PXP74SnV18=
+github.com/prashantv/gostub v1.1.0 h1:BTyx3RfQjRHnUWaGF9oQos79AlQ5k8WNktv7VGvVH4g=
+github.com/prashantv/gostub v1.1.0/go.mod h1:A5zLQHz7ieHGG7is6LLXLz7I8+3LZzsrV0P1IAHhP5U=
+github.com/prometheus/client_golang v1.22.0 h1:rb93p9lokFEsctTys46VnV1kLCDpVZ0a/Y92Vm0Zc6Q=
+github.com/prometheus/client_golang v1.22.0/go.mod h1:R7ljNsLXhuQXYZYtw6GAE9AZg8Y7vEW5scdCXrWRXC0=
+github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk=
+github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE=
+github.com/prometheus/common v0.65.0 h1:QDwzd+G1twt//Kwj/Ww6E9FQq1iVMmODnILtW1t2VzE=
+github.com/prometheus/common v0.65.0/go.mod h1:0gZns+BLRQ3V6NdaerOhMbwwRbNh9hkGINtQAsP5GS8=
github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc=
github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk=
-github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDNXVBh4M=
-github.com/rogpeppe/go-internal v1.11.0/go.mod h1:ddIwULY96R17DhadqLgMfk9H9tvdUzkipdSkR5nkCZA=
+github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
+github.com/rivo/uniseg v0.4.2 h1:YwD0ulJSJytLpiaWua0sBDusfsCZohxjxzVTYjwxfV8=
+github.com/rivo/uniseg v0.4.2/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88=
+github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII=
+github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o=
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
-github.com/sagikazarmark/locafero v0.4.0 h1:HApY1R9zGo4DBgr7dqsTH/JJxLTTsOt7u6keLGt6kNQ=
-github.com/sagikazarmark/locafero v0.4.0/go.mod h1:Pe1W6UlPYUk/+wc/6KFhbORCfqzgYEpgQ3O5fPuL3H4=
-github.com/sagikazarmark/slog-shim v0.1.0 h1:diDBnUNK9N/354PgrxMywXnAwEr1QZcOr6gto+ugjYE=
-github.com/sagikazarmark/slog-shim v0.1.0/go.mod h1:SrcSrq8aKtyuqEI1uvTDTK1arOWRIczQRv+GVI1AkeQ=
-github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o=
-github.com/shopspring/decimal v1.3.1 h1:2Usl1nmF/WZucqkFZhnfFYxxxu8LG21F6nPQBE5gKV8=
-github.com/shopspring/decimal v1.3.1/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o=
+github.com/sagikazarmark/locafero v0.7.0 h1:5MqpDsTGNDhY8sGp0Aowyf0qKsPrhewaLSsFaodPcyo=
+github.com/sagikazarmark/locafero v0.7.0/go.mod h1:2za3Cg5rMaTMoG/2Ulr9AwtFaIppKXTRYnozin4aB5k=
+github.com/shopspring/decimal v1.4.0 h1:bxl37RwXBklmTi0C79JfXCEBD1cqqHt0bbgBAGFp81k=
+github.com/shopspring/decimal v1.4.0/go.mod h1:gawqmDU56v4yIKSwfBSFip1HdCCXN8/+DMd9qYNcwME=
github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
-github.com/sirupsen/logrus v1.9.0 h1:trlNQbNUG3OdDrDil03MCb1H2o9nJ1x4/5LYw7byDE0=
-github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
+github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ=
+github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
github.com/sourcegraph/conc v0.3.0 h1:OQTbbt6P72L20UqAkXXuLOj79LfEanQ+YQFNpLA9ySo=
github.com/sourcegraph/conc v0.3.0/go.mod h1:Sdozi7LEKbFPqYX2/J+iBAM6HpqSLTASQIKqDmF7Mt0=
-github.com/spf13/afero v1.11.0 h1:WJQKhtpdm3v2IzqG8VMqrr6Rf3UYpEF239Jy9wNepM8=
-github.com/spf13/afero v1.11.0/go.mod h1:GH9Y3pIexgf1MTIWtNGyogA5MwRIDXGUr+hbWNoBjkY=
-github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
-github.com/spf13/cast v1.6.0 h1:GEiTHELF+vaR5dhz3VqZfFSzZjYbgeKDpBxQVS4GYJ0=
-github.com/spf13/cast v1.6.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo=
-github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM=
-github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y=
-github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
-github.com/spf13/pflag v1.0.6 h1:jFzHGLGAlb3ruxLB8MhbI6A8+AQX/2eW4qeyNZXNp2o=
+github.com/spf13/afero v1.12.0 h1:UcOPyRBYczmFn6yvphxkn9ZEOY65cpwGKb5mL36mrqs=
+github.com/spf13/afero v1.12.0/go.mod h1:ZTlWwG4/ahT8W7T0WQ5uYmjI9duaLQGy3Q2OAl4sk/4=
+github.com/spf13/cast v1.7.1 h1:cuNEagBQEHWN1FnbGEjCXL2szYEXqfJPbP2HNUaca9Y=
+github.com/spf13/cast v1.7.1/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo=
+github.com/spf13/cobra v1.9.1 h1:CXSaggrXdbHK9CF+8ywj8Amf7PBRmPCOJugH954Nnlo=
+github.com/spf13/cobra v1.9.1/go.mod h1:nDyEzZ8ogv936Cinf6g1RU9MRY64Ir93oCnqb9wxYW0=
github.com/spf13/pflag v1.0.6/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
-github.com/spf13/viper v1.19.0 h1:RWq5SEjt8o25SROyN3z2OrDB9l7RPd3lwTWU8EcEdcI=
-github.com/spf13/viper v1.19.0/go.mod h1:GQUN9bilAbhU/jgc1bKs99f/suXKeUMct8Adx5+Ntkg=
-github.com/stoewer/go-strcase v1.3.0 h1:g0eASXYtp+yvN9fK8sH94oCIk0fau9uV1/ZdJ0AVEzs=
-github.com/stoewer/go-strcase v1.3.0/go.mod h1:fAH5hQ5pehh+j3nZfvwdk2RgEgQjAoM8wodgtPmh1xo=
+github.com/spf13/pflag v1.0.7 h1:vN6T9TfwStFPFM5XzjsvmzZkLuaLX+HS+0SeFLRgU6M=
+github.com/spf13/pflag v1.0.7/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
+github.com/spf13/viper v1.20.0 h1:zrxIyR3RQIOsarIrgL8+sAvALXul9jeEPa06Y0Ph6vY=
+github.com/spf13/viper v1.20.0/go.mod h1:P9Mdzt1zoHIG8m2eZQinpiBjo6kCmZSKBClNNqjJvu4=
+github.com/stoewer/go-strcase v1.3.1 h1:iS0MdW+kVTxgMoE1LAZyMiYJFKlOzLooE4MxjirtkAs=
+github.com/stoewer/go-strcase v1.3.1/go.mod h1:fAH5hQ5pehh+j3nZfvwdk2RgEgQjAoM8wodgtPmh1xo=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
@@ -247,49 +252,51 @@ github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY=
github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
-github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
-github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
-github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8=
github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU=
-github.com/syself/hrobot-go v0.2.6 h1:22mBsNZATYyGqL6VIplmz71i9aNWTchWU3F5SS+cLDQ=
-github.com/syself/hrobot-go v0.2.6/go.mod h1:Oy47yZs+fJKcSh38S3OiNJdY34MXb0pkk796UnpYBnc=
+github.com/syself/hrobot-go v0.2.7 h1:1TeFGifXnsAr0u2ZdbH88kazcqmuQHaKgM81p29XkQo=
+github.com/syself/hrobot-go v0.2.7/go.mod h1:Oy47yZs+fJKcSh38S3OiNJdY34MXb0pkk796UnpYBnc=
github.com/valyala/fastjson v1.6.4 h1:uAUNq9Z6ymTgGhcm0UynUAB6tlbakBrz6CQFax3BXVQ=
github.com/valyala/fastjson v1.6.4/go.mod h1:CLCAqky6SMuOcxStkYQvblddUtoRxhYMGLrsQns1aXY=
+github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM=
+github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg=
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
-github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
-go.etcd.io/etcd/api/v3 v3.5.15 h1:3KpLJir1ZEBrYuV2v+Twaa/e2MdDCEZ/70H+lzEiwsk=
-go.etcd.io/etcd/api/v3 v3.5.15/go.mod h1:N9EhGzXq58WuMllgH9ZvnEr7SI9pS0k0+DHZezGp7jM=
-go.etcd.io/etcd/client/pkg/v3 v3.5.15 h1:fo0HpWz/KlHGMCC+YejpiCmyWDEuIpnTDzpJLB5fWlA=
-go.etcd.io/etcd/client/pkg/v3 v3.5.15/go.mod h1:mXDI4NAOwEiszrHCb0aqfAYNCrZP4e9hRca3d1YK8EU=
-go.etcd.io/etcd/client/v3 v3.5.15 h1:23M0eY4Fd/inNv1ZfU3AxrbbOdW79r9V9Rl62Nm6ip4=
-go.etcd.io/etcd/client/v3 v3.5.15/go.mod h1:CLSJxrYjvLtHsrPKsy7LmZEE+DK2ktfd2bN4RhBMwlU=
-go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.49.0 h1:4Pp6oUg3+e/6M4C0A/3kJ2VYa++dsWVTtGgLVj5xtHg=
-go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.49.0/go.mod h1:Mjt1i1INqiaoZOMGR1RIUJN+i3ChKoFRqzrRQhlkbs0=
-go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0 h1:jq9TW8u3so/bN+JPT166wjOI6/vQPF6Xe7nMNIltagk=
-go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0/go.mod h1:p8pYQP+m5XfbZm9fxtSKAbM6oIllS7s2AfxrChvc7iw=
-go.opentelemetry.io/otel v1.24.0 h1:0LAOdjNmQeSTzGBzduGe/rU4tZhMwL5rWgtp9Ku5Jfo=
-go.opentelemetry.io/otel v1.24.0/go.mod h1:W7b9Ozg4nkF5tWI5zsXkaKKDjdVjpD4oAt9Qi/MArHo=
-go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.22.0 h1:9M3+rhx7kZCIQQhQRYaZCdNu1V73tm4TvXs2ntl98C4=
-go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.22.0/go.mod h1:noq80iT8rrHP1SfybmPiRGc9dc5M8RPmGvtwo7Oo7tc=
-go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.20.0 h1:gvmNvqrPYovvyRmCSygkUDyL8lC5Tl845MLEwqpxhEU=
-go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.20.0/go.mod h1:vNUq47TGFioo+ffTSnKNdob241vePmtNZnAODKapKd0=
+go.etcd.io/etcd/api/v3 v3.5.20 h1:aKfz3nPZECWoZJXMSH9y6h2adXjtOHaHTGEVCuCmaz0=
+go.etcd.io/etcd/api/v3 v3.5.20/go.mod h1:QqKGViq4KTgOG43dr/uH0vmGWIaoJY3ggFi6ZH0TH/U=
+go.etcd.io/etcd/client/pkg/v3 v3.5.20 h1:sZIAtra+xCo56gdf6BR62to/hiie5Bwl7hQIqMzVTEM=
+go.etcd.io/etcd/client/pkg/v3 v3.5.20/go.mod h1:qaOi1k4ZA9lVLejXNvyPABrVEe7VymMF2433yyRQ7O0=
+go.etcd.io/etcd/client/v3 v3.5.20 h1:jMT2MwQEhyvhQg49Cec+1ZHJzfUf6ZgcmV0GjPv0tIQ=
+go.etcd.io/etcd/client/v3 v3.5.20/go.mod h1:J5lbzYRMUR20YolS5UjlqqMcu3/wdEvG5VNBhzyo3m0=
+go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA=
+go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A=
+go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.54.0 h1:r6I7RJCN86bpD/FQwedZ0vSixDpwuWREjW9oRMsmqDc=
+go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.54.0/go.mod h1:B9yO6b04uB80CzjedvewuqDhxJxi11s7/GtiGa8bAjI=
+go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0 h1:yd02MEjBdJkG3uabWP9apV+OuWRIXGDuJEUJbOHmCFU=
+go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0/go.mod h1:umTcuxiv1n/s/S6/c2AT/g2CQ7u5C59sHDNmfSwgz7Q=
+go.opentelemetry.io/otel v1.33.0 h1:/FerN9bax5LoK51X/sI0SVYrjSE0/yUL7DpxW4K3FWw=
+go.opentelemetry.io/otel v1.33.0/go.mod h1:SUUkR6csvUQl+yjReHu5uM3EtVV7MBm5FHKRlNx4I8I=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.33.0 h1:Vh5HayB/0HHfOQA7Ctx69E/Y/DcQSMPpKANYVMQ7fBA=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.33.0/go.mod h1:cpgtDBaqD/6ok/UG0jT15/uKjAY8mRA53diogHBg3UI=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.33.0 h1:5pojmb1U1AogINhN3SurB+zm/nIcusopeBNp42f45QM=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.33.0/go.mod h1:57gTHJSE5S1tqg+EKsLPlTWhpHMsWlVmer+LA926XiA=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.22.0 h1:FyjCyI9jVEfqhUh2MoSkmolPjfh5fp2hnV0b0irxH4Q=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.22.0/go.mod h1:hYwym2nDEeZfG/motx0p7L7J1N1vyzIThemQsb4g2qY=
-go.opentelemetry.io/otel/metric v1.24.0 h1:6EhoGWWK28x1fbpA4tYTOWBkPefTDQnb8WSGXlc88kI=
-go.opentelemetry.io/otel/metric v1.24.0/go.mod h1:VYhLe1rFfxuTXLgj4CBiyz+9WYBA8pNGJgDcSFRKBco=
-go.opentelemetry.io/otel/sdk v1.22.0 h1:6coWHw9xw7EfClIC/+O31R8IY3/+EiRFHevmHafB2Gw=
-go.opentelemetry.io/otel/sdk v1.22.0/go.mod h1:iu7luyVGYovrRpe2fmj3CVKouQNdTOkxtLzPvPz1DOc=
-go.opentelemetry.io/otel/trace v1.24.0 h1:CsKnnL4dUAr/0llH9FKuc698G04IrpWV0MQA/Y1YELI=
-go.opentelemetry.io/otel/trace v1.24.0/go.mod h1:HPc3Xr/cOApsBI154IU0OI0HJexz+aw5uPdbs3UCjNU=
-go.opentelemetry.io/proto/otlp v1.0.0 h1:T0TX0tmXU8a3CbNXzEKGeU5mIVOdf0oykP+u2lIVU/I=
-go.opentelemetry.io/proto/otlp v1.0.0/go.mod h1:Sy6pihPLfYHkr3NkUbEhGHFhINUSI/v80hjKIs5JXpM=
+go.opentelemetry.io/otel/metric v1.33.0 h1:r+JOocAyeRVXD8lZpjdQjzMadVZp2M4WmQ+5WtEnklQ=
+go.opentelemetry.io/otel/metric v1.33.0/go.mod h1:L9+Fyctbp6HFTddIxClbQkjtubW6O9QS3Ann/M82u6M=
+go.opentelemetry.io/otel/sdk v1.33.0 h1:iax7M131HuAm9QkZotNHEfstof92xM+N8sr3uHXc2IM=
+go.opentelemetry.io/otel/sdk v1.33.0/go.mod h1:A1Q5oi7/9XaMlIWzPSxLRWOI8nG3FnzHJNbiENQuihM=
+go.opentelemetry.io/otel/trace v1.33.0 h1:cCJuF7LRjUFso9LPnEAHJDB2pqzp+hbO8eu1qqW2d/s=
+go.opentelemetry.io/otel/trace v1.33.0/go.mod h1:uIcdVUZMpTAmz0tI1z04GoVSezK37CbGV4fr1f2nBck=
+go.opentelemetry.io/proto/otlp v1.4.0 h1:TA9WRvW6zMwP+Ssb6fLoUIuirti1gGbP28GcKG1jgeg=
+go.opentelemetry.io/proto/otlp v1.4.0/go.mod h1:PPBWZIP98o2ElSqI35IHfu7hIhSwvc5N38Jw8pXuGFY=
+go.uber.org/automaxprocs v1.6.0 h1:O3y2/QNTOdbF+e/dpXNNW7Rx2hZ4sTIPyybbxyNqTUs=
+go.uber.org/automaxprocs v1.6.0/go.mod h1:ifeIMSnPZuznNm6jmdzmU3/bfk01Fe2fotchwEFJ8r8=
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0=
@@ -300,33 +307,28 @@ golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACk
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
-golang.org/x/crypto v0.3.0/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4=
-golang.org/x/crypto v0.36.0 h1:AnAEvhDddvBdpY+uR+MyHmuZzzNqXSe/GvuDeob5L34=
-golang.org/x/crypto v0.36.0/go.mod h1:Y4J0ReaxCR1IMaabaSMugxJES1EpwhBHhv2bDHklZvc=
-golang.org/x/exp v0.0.0-20250305212735-054e65f0b394 h1:nDVHiLt8aIbd/VzvPWN6kSOPE7+F/fNFDSXLVYkE/Iw=
-golang.org/x/exp v0.0.0-20250305212735-054e65f0b394/go.mod h1:sIifuuw/Yco/y6yb6+bDNfyeQ/MdPUy/hKEMYQV17cM=
+golang.org/x/crypto v0.40.0 h1:r4x+VvoG5Fm+eJcxMaY8CQM7Lb0l1lsmjGBQ6s8BfKM=
+golang.org/x/crypto v0.40.0/go.mod h1:Qr1vMER5WyS2dfPHAlsOj01wgLbsyWtFn/aY+5+ZdxY=
+golang.org/x/exp v0.0.0-20250718183923-645b1fa84792 h1:R9PFI6EUdfVKgwKjZef7QIwGcBKu86OEFpJ9nUEP2l4=
+golang.org/x/exp v0.0.0-20250718183923-645b1fa84792/go.mod h1:A+z0yzpGtvnG90cToK5n2tu8UJVP2XUATh+r+sfOOOc=
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
-golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
-golang.org/x/mod v0.24.0 h1:ZfthKaKaT4NrhGVZHO1/WDTwGES4De8KtWO0SIbNJMU=
-golang.org/x/mod v0.24.0/go.mod h1:IXM97Txy2VM4PJ3gI61r1YEk/gAj6zAHN3AdZt6S9Ww=
+golang.org/x/mod v0.26.0 h1:EGMPT//Ezu+ylkCijjPc+f4Aih7sZvaAr+O3EHBxvZg=
+golang.org/x/mod v0.26.0/go.mod h1:/j6NAhSk8iQ723BGAUyoAcn7SlD7s15Dp9Nd/SfeaFQ=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
-golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
-golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY=
-golang.org/x/net v0.37.0 h1:1zLorHbz+LYj7MQlSf1+2tPIIgibq2eL5xkrGk6f+2c=
-golang.org/x/net v0.37.0/go.mod h1:ivrbrMbzFq5J41QOQh0siUuly180yBYtLp+CKbEaFx8=
-golang.org/x/oauth2 v0.25.0 h1:CY4y7XT9v0cRI9oupztF8AgiIu99L/ksR/Xp/6jrZ70=
-golang.org/x/oauth2 v0.25.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI=
+golang.org/x/net v0.42.0 h1:jzkYrhi3YQWD6MLBJcsklgQsoAcw89EcZbJw8Z614hs=
+golang.org/x/net v0.42.0/go.mod h1:FF1RA5d3u7nAYA4z2TkclSCKh68eSXtiFwcWQpPXdt8=
+golang.org/x/oauth2 v0.30.0 h1:dnDm7JmhM45NNpd8FDDeLhK6FwqbOf4MLCM9zb1BOHI=
+golang.org/x/oauth2 v0.30.0/go.mod h1:B++QgG3ZKulg6sRPGD/mqlHQs5rB3Ml9erfeDY7xKlU=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.12.0 h1:MHc5BpPuC30uJk597Ri8TV3CNZcTLu6B6z4lJy+g6Jw=
-golang.org/x/sync v0.12.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
+golang.org/x/sync v0.16.0 h1:ycBJEhp9p4vXvUZNszeOq0kGTPghopOL8q0fq3vstxw=
+golang.org/x/sync v0.16.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@@ -335,99 +337,89 @@ golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.31.0 h1:ioabZlmFYtWhL+TRYpcnNlLwhyxaM9kWTDEmfnprqik=
-golang.org/x/sys v0.31.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
+golang.org/x/sys v0.34.0 h1:H5Y5sJ2L2JRdyv7ROF1he/lPdvFsd0mJHFw2ThKHxLA=
+golang.org/x/sys v0.34.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
-golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
-golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc=
-golang.org/x/term v0.30.0 h1:PQ39fJZ+mfadBm0y5WlL4vlM7Sx1Hgf13sMIY2+QS9Y=
-golang.org/x/term v0.30.0/go.mod h1:NYYFdzHoI5wRh/h5tDMdMqCqPJZEuNqVR5xJLd/n67g=
+golang.org/x/term v0.33.0 h1:NuFncQrRcaRvVmgRkvM3j/F00gWIAlcmlB8ACEKmGIg=
+golang.org/x/term v0.33.0/go.mod h1:s18+ql9tYWp1IfpV9DmCtQDDSRBUjKaw9M1eAv5UeF0=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
-golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
-golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
-golang.org/x/text v0.23.0 h1:D71I7dUrlY+VX0gQShAThNGHFxZ13dGLBHQLVl1mJlY=
-golang.org/x/text v0.23.0/go.mod h1:/BLNzu4aZCJ1+kcD0DNRotWKage4q2rGVAg4o22unh4=
-golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk=
-golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
+golang.org/x/text v0.27.0 h1:4fGWRpyh641NLlecmyl4LOe6yDdfaYNrGb2zdfo4JV4=
+golang.org/x/text v0.27.0/go.mod h1:1D28KMCvyooCX9hBiosv5Tz/+YLxj0j7XhWjpSUF7CU=
+golang.org/x/time v0.9.0 h1:EsRrnYcQiGH+5FfbgvV4AP7qEZstoyrHB0DzarOQ4ZY=
+golang.org/x/time v0.9.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
-golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
-golang.org/x/tools v0.31.0 h1:0EedkvKDbh+qistFTd0Bcwe/YLh4vHwWEkiI0toFIBU=
-golang.org/x/tools v0.31.0/go.mod h1:naFTU+Cev749tSJRXJlna0T3WxKvb1kWEx15xA4SdmQ=
+golang.org/x/tools v0.35.0 h1:mBffYraMEf7aa0sB+NuKnuCy8qI/9Bughn8dC2Gu5r0=
+golang.org/x/tools v0.35.0/go.mod h1:NKdj5HkL/73byiZSJjqJgKn3ep7KjFkBOkR/Hps3VPw=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
-gomodules.xyz/jsonpatch/v2 v2.4.0 h1:Ci3iUJyx9UeRx7CeFN8ARgGbkESwJK+KB9lLcWxY/Zw=
-gomodules.xyz/jsonpatch/v2 v2.4.0/go.mod h1:AH3dM2RI6uoBZxn3LVrfvJ3E0/9dG4cSrbuBJT4moAY=
-google.golang.org/genproto/googleapis/api v0.0.0-20240311132316-a219d84964c2 h1:rIo7ocm2roD9DcFIX67Ym8icoGCKSARAiPljFhh5suQ=
-google.golang.org/genproto/googleapis/api v0.0.0-20240311132316-a219d84964c2/go.mod h1:O1cOfN1Cy6QEYr7VxtjOyP5AdAuR0aJ/MYZaaof623Y=
-google.golang.org/genproto/googleapis/rpc v0.0.0-20240314234333-6e1732d8331c h1:lfpJ/2rWPa/kJgxyyXM8PrNnfCzcmxJ265mADgwmvLI=
-google.golang.org/genproto/googleapis/rpc v0.0.0-20240314234333-6e1732d8331c/go.mod h1:WtryC6hu0hhx87FDGxWCDptyssuo68sk10vYjF+T9fY=
-google.golang.org/grpc v1.62.2 h1:iEIj1U5qjyBjzkM5nk3Fq+S1IbjbXSyqeULZ1Nfo4AA=
-google.golang.org/grpc v1.62.2/go.mod h1:IWTG0VlJLCh1SkC58F7np9ka9mx/WNkjl4PGJaiq+QE=
-google.golang.org/protobuf v1.36.5 h1:tPhr+woSbjfYvY6/GPufUoYizxw1cF/yFoxJ2fmpwlM=
-google.golang.org/protobuf v1.36.5/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE=
+gomodules.xyz/jsonpatch/v2 v2.5.0 h1:JELs8RLM12qJGXU4u/TO3V25KW8GreMKl9pdkk14RM0=
+gomodules.xyz/jsonpatch/v2 v2.5.0/go.mod h1:AH3dM2RI6uoBZxn3LVrfvJ3E0/9dG4cSrbuBJT4moAY=
+google.golang.org/genproto/googleapis/api v0.0.0-20241209162323-e6fa225c2576 h1:CkkIfIt50+lT6NHAVoRYEyAvQGFM7xEwXUUywFvEb3Q=
+google.golang.org/genproto/googleapis/api v0.0.0-20241209162323-e6fa225c2576/go.mod h1:1R3kvZ1dtP3+4p4d3G8uJ8rFk/fWlScl38vanWACI08=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20241223144023-3abc09e42ca8 h1:TqExAhdPaB60Ux47Cn0oLV07rGnxZzIsaRhQaqS666A=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20241223144023-3abc09e42ca8/go.mod h1:lcTa1sDdWEIHMWlITnIczmw5w60CF9ffkb8Z+DVmmjA=
+google.golang.org/grpc v1.68.1 h1:oI5oTa11+ng8r8XMMN7jAOmWfPZWbYpCFaMUTACxkM0=
+google.golang.org/grpc v1.68.1/go.mod h1:+q1XYFJjShcqn0QZHvCyeR4CXPA+llXIeUIfIe00waw=
+google.golang.org/protobuf v1.36.6 h1:z1NpPI8ku2WgiWnf+t9wTPsn6eP1L7ksHUlkfLvd9xY=
+google.golang.org/protobuf v1.36.6/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
+gopkg.in/evanphx/json-patch.v4 v4.12.0 h1:n6jtcsulIzXPJaxegRbvFNNrZDjbij7ny3gmSPG+6V4=
+gopkg.in/evanphx/json-patch.v4 v4.12.0/go.mod h1:p8EYWUEYMpynmqDbY58zCKCFZw8pRWMG4EsWvDvM72M=
gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=
gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
-gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA=
-gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
-gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
-gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
-gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
-gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
-gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
-gopkg.in/yaml.v3 v3.0.0/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gotest.tools/v3 v3.4.0 h1:ZazjZUfuVeZGLAmlKKuyv3IKP5orXcwtOwDQH6YVr6o=
gotest.tools/v3 v3.4.0/go.mod h1:CtbdzLSsqVhDgMtKsx03ird5YTGB3ar27v0u/yKBW5g=
-k8s.io/api v0.30.3 h1:ImHwK9DCsPA9uoU3rVh4QHAHHK5dTSv1nxJUapx8hoQ=
-k8s.io/api v0.30.3/go.mod h1:GPc8jlzoe5JG3pb0KJCSLX5oAFIW3/qNJITlDj8BH04=
-k8s.io/apiextensions-apiserver v0.30.3 h1:oChu5li2vsZHx2IvnGP3ah8Nj3KyqG3kRSaKmijhB9U=
-k8s.io/apiextensions-apiserver v0.30.3/go.mod h1:uhXxYDkMAvl6CJw4lrDN4CPbONkF3+XL9cacCT44kV4=
-k8s.io/apimachinery v0.30.3 h1:q1laaWCmrszyQuSQCfNB8cFgCuDAoPszKY4ucAjDwHc=
-k8s.io/apimachinery v0.30.3/go.mod h1:iexa2somDaxdnj7bha06bhb43Zpa6eWH8N8dbqVjTUc=
-k8s.io/apiserver v0.30.3 h1:QZJndA9k2MjFqpnyYv/PH+9PE0SHhx3hBho4X0vE65g=
-k8s.io/apiserver v0.30.3/go.mod h1:6Oa88y1CZqnzetd2JdepO0UXzQX4ZnOekx2/PtEjrOg=
-k8s.io/client-go v0.30.3 h1:bHrJu3xQZNXIi8/MoxYtZBBWQQXwy16zqJwloXXfD3k=
-k8s.io/client-go v0.30.3/go.mod h1:8d4pf8vYu665/kUbsxWAQ/JDBNWqfFeZnvFiVdmx89U=
-k8s.io/cluster-bootstrap v0.30.3 h1:MgxyxMkpaC6mu0BKWJ8985XCOnKU+eH3Iy+biwtDXRk=
-k8s.io/cluster-bootstrap v0.30.3/go.mod h1:h8BoLDfdD7XEEIXy7Bx9FcMzxHwz29jsYYi34bM5DKU=
-k8s.io/component-base v0.30.3 h1:Ci0UqKWf4oiwy8hr1+E3dsnliKnkMLZMVbWzeorlk7s=
-k8s.io/component-base v0.30.3/go.mod h1:C1SshT3rGPCuNtBs14RmVD2xW0EhRSeLvBh7AGk1quA=
+k8s.io/api v0.32.7 h1:CBhHkoi3YJW8QQI6VL/Hu9f1HHVImmuIh513d4H4VfQ=
+k8s.io/api v0.32.7/go.mod h1:YEB46LZ/M0/9t0m+R2FxW5fkZAUR/eoS6sZQKS3mBYk=
+k8s.io/apiextensions-apiserver v0.32.7 h1:w7IzqA3SZG9KNm5YMtrrqY3ipPgt13rZevDaZSubARA=
+k8s.io/apiextensions-apiserver v0.32.7/go.mod h1:CelzsiBUTLZeJ+MxBEcuDEgu9Qr3LQkZqmydvA/W9UA=
+k8s.io/apimachinery v0.32.7 h1:1vTegNQIfM7dvZrMV5//6jJv2odKAnadv9Bg+doJmaA=
+k8s.io/apimachinery v0.32.7/go.mod h1:GpHVgxoKlTxClKcteaeuF1Ul/lDVb74KpZcxcmLDElE=
+k8s.io/apiserver v0.32.7 h1:BJADFQpbKM1LC5GTueefdnDjzu5PUXAcEgWZrs2gj18=
+k8s.io/apiserver v0.32.7/go.mod h1:a3O36FgT3dQ26oufk9/1VVmWcna/OLQjofirYiocfQI=
+k8s.io/client-go v0.32.7 h1:ZDhv3JTaQ/IejnNXRePBZdRecAEvxf8+pFdt/ruuWXc=
+k8s.io/client-go v0.32.7/go.mod h1:/he4Akuzee/lTiWmcsrpZfCQ2LPNLTC2qqumLVAw/Fw=
+k8s.io/cluster-bootstrap v0.32.3 h1:AqIpsUhB6MUeaAsl1WvaUw54AHRd2hfZrESlKChtd8s=
+k8s.io/cluster-bootstrap v0.32.3/go.mod h1:CHbBwgOb6liDV6JFUTkx5t85T2xidy0sChBDoyYw344=
+k8s.io/component-base v0.32.7 h1:iXfcDveIsx0CyB0b8qo0/4pfgmhwshaO/u4ij1hZeAM=
+k8s.io/component-base v0.32.7/go.mod h1:Qfa6+z8IIyIdyqewerOlWaibCsxKbpBNd3ATNrPKe/A=
k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk=
k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE=
-k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 h1:BZqlfIlq5YbRMFko6/PM7FjZpUb45WallggurYhKGag=
-k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340/go.mod h1:yD4MZYeKMBwQKVht279WycxKyM84kkAx2DPrTXaeb98=
-k8s.io/kubectl v0.30.3 h1:YIBBvMdTW0xcDpmrOBzcpUVsn+zOgjMYIu7kAq+yqiI=
-k8s.io/kubectl v0.30.3/go.mod h1:IcR0I9RN2+zzTRUa1BzZCm4oM0NLOawE6RzlDvd1Fpo=
-k8s.io/utils v0.0.0-20241210054802-24370beab758 h1:sdbE21q2nlQtFh65saZY+rRM6x6aJJI8IUa1AmH/qa0=
-k8s.io/utils v0.0.0-20241210054802-24370beab758/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
-sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.30.0 h1:Tc9rS7JJoZ9sl3OpL4842oIk6lH7gWBb0JOmJ0ute7M=
-sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.30.0/go.mod h1:1ewhL9l1gkPcU/IU/6rFYfikf+7Y5imWv7ARVbBOzNs=
-sigs.k8s.io/cluster-api v1.8.10 h1:iKzPWhzP/PNIKfTlPLICdF8G0G6Rh31xVqut435qoPI=
-sigs.k8s.io/cluster-api v1.8.10/go.mod h1:5MX/395c1wR69dkIwOvhjUwXASu19rU7RCCMeljx96c=
-sigs.k8s.io/cluster-api/test v1.8.10 h1:LEKds8U2nQP8KpHQm071aXp7C0nqxd9ean7PRWWk3L8=
-sigs.k8s.io/cluster-api/test v1.8.10/go.mod h1:jafQ8dAUENdNuFVldDqTAqXutWOa+MDkF+L0gMpsiBE=
-sigs.k8s.io/controller-runtime v0.18.7 h1:WDnx8LTRY8Fn1j/7B+S/R9MeDjWNAzpDBoaSvMSrQME=
-sigs.k8s.io/controller-runtime v0.18.7/go.mod h1:L9r3fUZhID7Q9eK9mseNskpaTg2n11f/tlb8odyzJ4Y=
-sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo=
-sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0=
-sigs.k8s.io/kind v0.27.0 h1:PQ3f0iAWNIj66LYkZ1ivhEg/+Zb6UPMbO+qVei/INZA=
-sigs.k8s.io/kind v0.27.0/go.mod h1:RZVFmy6qcwlSWwp6xeIUv7kXCPF3i8MXsEXxW/J+gJY=
-sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4=
-sigs.k8s.io/structured-merge-diff/v4 v4.4.1/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08=
+k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff h1:/usPimJzUKKu+m+TE36gUyGcf03XZEP0ZIKgKj35LS4=
+k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff/go.mod h1:5jIi+8yX4RIb8wk3XwBo5Pq2ccx4FP10ohkbSKCZoK8=
+k8s.io/kubectl v0.32.7 h1:0I9J+y4MMwn1w/U+4FZMxW55Yk8jzVBCFlC/qGZF7t0=
+k8s.io/kubectl v0.32.7/go.mod h1:0xBY/uZgeqib3ffk0XLCS1SQ6aVL3rLCv9i99IAhrMU=
+k8s.io/utils v0.0.0-20250604170112-4c0f3b243397 h1:hwvWFiBzdWw1FhfY1FooPn3kzWuJ8tmbZBHi4zVsl1Y=
+k8s.io/utils v0.0.0-20250604170112-4c0f3b243397/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
+sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.2 h1:jpcvIRr3GLoUoEKRkHKSmGjxb6lWwrBlJsXc+eUYQHM=
+sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.2/go.mod h1:Ve9uj1L+deCXFrPOk1LpFXqTg7LCFzFso6PA48q/XZw=
+sigs.k8s.io/cluster-api v1.10.4 h1:5mdyWLGbbwOowWrjqM/J9N600QnxTohu5J1/1YR6g7c=
+sigs.k8s.io/cluster-api v1.10.4/go.mod h1:68GJs286ZChsncp+TxYNj/vhy2NWokiPtH4+SA0afs0=
+sigs.k8s.io/cluster-api/test v1.10.4 h1:1CJp7yjh2XazaPFtZzxSby9Gip2yjW0dNxyyHR7VjDk=
+sigs.k8s.io/cluster-api/test v1.10.4/go.mod h1:n2LsLQxc4RSLDjUXhgzquSTagZTJpUcY7uwtQtCRmaY=
+sigs.k8s.io/controller-runtime v0.20.4 h1:X3c+Odnxz+iPTRobG4tp092+CvBU9UK0t/bRf+n0DGU=
+sigs.k8s.io/controller-runtime v0.20.4/go.mod h1:xg2XB0K5ShQzAgsoujxuKN4LNXR2LfwwHsPj7Iaw+XY=
+sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 h1:/Rv+M11QRah1itp8VhT6HoVx1Ray9eB4DBr+K+/sCJ8=
+sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3/go.mod h1:18nIHnGi6636UCz6m8i4DhaJ65T6EruyzmoQqI2BVDo=
+sigs.k8s.io/kind v0.29.0 h1:3TpCsyh908IkXXpcSnsMjWdwdWjIl7o9IMZImZCWFnI=
+sigs.k8s.io/kind v0.29.0/go.mod h1:ldWQisw2NYyM6k64o/tkZng/1qQW7OlzcN5a8geJX3o=
+sigs.k8s.io/randfill v0.0.0-20250304075658-069ef1bbf016/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY=
+sigs.k8s.io/randfill v1.0.0 h1:JfjMILfT8A6RbawdsK2JXGBR5AQVfd+9TbzrlneTyrU=
+sigs.k8s.io/randfill v1.0.0/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY=
+sigs.k8s.io/structured-merge-diff/v4 v4.6.0 h1:IUA9nvMmnKWcj5jl84xn+T5MnlZKThmUW1TdblaLVAc=
+sigs.k8s.io/structured-merge-diff/v4 v4.6.0/go.mod h1:dDy58f92j70zLsuZVuUX5Wp9vtxXpaZnkPGWeqDfCps=
sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E=
sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY=
diff --git a/hack/golang-modules-update.sh b/hack/golang-modules-update.sh
index a3fb78a71..04594cb9d 100755
--- a/hack/golang-modules-update.sh
+++ b/hack/golang-modules-update.sh
@@ -22,8 +22,11 @@ cd "${REPO_ROOT}" || exit 1
DIRS="./ ./hack/tools"
for DIR in ${DIRS}; do
- cd "${REPO_ROOT}/${DIR}" && go mod download
- cd "${REPO_ROOT}/${DIR}" && go mod verify
- cd "${REPO_ROOT}/${DIR}" && go mod tidy
- cd "${REPO_ROOT}/${DIR}" && go mod vendor
+ (
+ cd "${REPO_ROOT}/${DIR}"
+ go mod download
+ go mod verify
+ go mod tidy
+ go mod vendor
+ )
done
diff --git a/hack/hcloud-image-url-command-states-markdown-from-logs.py b/hack/hcloud-image-url-command-states-markdown-from-logs.py
new file mode 100644
index 000000000..673aef1e7
--- /dev/null
+++ b/hack/hcloud-image-url-command-states-markdown-from-logs.py
@@ -0,0 +1,78 @@
+#!/usr/bin/env python3
+
+# Copyright 2025 The Kubernetes Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Create markdown table from transitions:
+# k logs deployments/caph-controller-manager | python3 hack/hcloud-image-url-command-states-markdown-from-logs.py
+import sys, json
+from collections import defaultdict
+
+agg = defaultdict(lambda: [0, 0.0, float("inf"), float("-inf")]) # count,sum,min,max
+
+for line in sys.stdin:
+ try:
+ o = json.loads(line)
+ if not o.get("durationInState"):
+ continue
+ old, new, d = o.get("oldState"), o.get("newState"), o.get("durationInState")
+ if old is None or new is None or d is None:
+ continue
+ d = float(d)
+ k = (old, new)
+ agg[k][0] += 1
+ agg[k][1] += d
+ agg[k][2] = min(agg[k][2], d)
+ agg[k][3] = max(agg[k][3], d)
+ except Exception:
+ pass
+
+custom_state_order = [
+ "",
+ "empty",
+ "Initializing",
+ "EnablingRescue",
+ "BootingToRescue",
+ "RunningImageCommand",
+ "BootingToRealOS",
+]
+order_index = {s: i for i, s in enumerate(custom_state_order)}
+
+
+def _state_rank(s: str) -> int:
+ # states not in list are placed after the predefined ones, keeping alphabetical order among themselves
+ base = order_index.get(s, len(custom_state_order))
+ if base == len(custom_state_order):
+ # offset plus alphabetical tiebreaker via tuple sort later
+ return base
+ return base
+
+
+def _sort_key(item):
+ (old, new), _ = item
+ return (_state_rank(old), _state_rank(new), old, new)
+
+
+rows = sorted(agg.items(), key=_sort_key)
+
+if not rows:
+ print("No provisioning state transitions detected (no lines with durationInState).")
+ sys.exit(0)
+
+# Markdown table header
+print("| oldState | newState | avg(s) | min(s) | max(s) |")
+print("|----------|----------|-------:|-------:|-------:|")
+for (old, new), (cnt, sumd, mn, mx) in rows:
+ avg = sumd / cnt if cnt else 0.0
+ print(f"| {old} | {new} | {avg:.2f} | {mn:.2f} | {mx:.2f} |")
diff --git a/hack/output-for-watch.sh b/hack/output-for-watch.sh
index cf1f4b60e..2be00f62e 100755
--- a/hack/output-for-watch.sh
+++ b/hack/output-for-watch.sh
@@ -26,7 +26,8 @@ kubectl get clusters -A
print_heading machines:
-kubectl get machines -A
+kubectl -n org-testing get machines \
+ -o custom-columns='NAME:.metadata.name,NODENAME:.status.nodeRef.name,IP:.status.addresses[?(@.type=="ExternalIP")].address,PROVIDERID:.spec.providerID,PHASE:.status.phase,VERSION:.spec.version'
print_heading hcloudmachine:
@@ -42,7 +43,7 @@ kubectl get hetznerbaremetalhost -A
print_heading events:
-kubectl get events -A --sort-by=lastTimestamp | grep -vP 'LeaderElection' | tail -8
+kubectl get events -A --sort-by=lastTimestamp | grep -vP 'LeaderElection' | tail -6
print_heading caph:
@@ -54,7 +55,8 @@ regex='^I\d\d\d\d|\
.*failed to retrieve Spec.ProviderID|\
.*failed to patch Machine default
'
-capi_logs=$(kubectl logs -n capi-system deployments/capi-controller-manager --since 7m | grep -vP "$(echo "$regex" | tr -d '\n')" | tail -5)
+capi_ns=$(kubectl get deployments -A | grep capi-con | cut -d' ' -f1)
+capi_logs=$(kubectl logs -n "$capi_ns" deployments/capi-controller-manager --since 10m | grep -vP "$(echo "$regex" | tr -d '\n')" | tail -5)
if [ -n "$capi_logs" ]; then
print_heading capi
echo "$capi_logs"
@@ -62,7 +64,7 @@ fi
echo
-if [ $(kubectl get machine -l cluster.x-k8s.io/control-plane 2>/dev/null | wc -l) -eq 0 ]; then
+if [[ $(kubectl get machine -l cluster.x-k8s.io/control-plane 2>/dev/null | wc -l) -eq 0 ]]; then
echo "❌ no control-plane machine exists."
exit 1
fi
diff --git a/hack/tail-controller-logs.sh b/hack/tail-controller-logs.sh
index 04233a117..fda118cc3 100755
--- a/hack/tail-controller-logs.sh
+++ b/hack/tail-controller-logs.sh
@@ -14,13 +14,14 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-pod=$(kubectl -n caph-system get pods | grep caph-controller-manager | cut -d' ' -f1)
+ns=$(kubectl get deployments.apps -A | grep caph-controller-manager | cut -d' ' -f1)
+pod=$(kubectl -n "$ns" get pods | grep caph-controller-manager | cut -d' ' -f1)
if [ -z "$pod" ]; then
echo "failed to find caph-controller-manager pod"
exit 1
fi
-kubectl -n caph-system logs "$pod" --tail 200 | \
- ./hack/filter-caph-controller-manager-logs.py - | \
- tail -n 20
+kubectl -n "$ns" logs "$pod" --tail 200 |
+ ./hack/filter-caph-controller-manager-logs.py - |
+ tail -n 10
diff --git a/hack/test-unit.sh b/hack/test-unit.sh
index f556ad045..9a18862be 100755
--- a/hack/test-unit.sh
+++ b/hack/test-unit.sh
@@ -36,5 +36,5 @@ hack/tools/bin/gotestsum \
--jsonfile=.reports/go-test-output.json \
--junitfile=.coverage/junit.xml \
--format testname -- \
- -covermode=atomic -coverprofile=.coverage/cover.out -p=4 -timeout 5m \
+ -covermode=atomic -coverprofile=.coverage/cover.out -p=1 -timeout 5m \
./controllers/... ./pkg/... ./api/...
diff --git a/hack/update-operator-dev-deployment.sh b/hack/update-operator-dev-deployment.sh
index 0e4945947..8f82da6ba 100755
--- a/hack/update-operator-dev-deployment.sh
+++ b/hack/update-operator-dev-deployment.sh
@@ -25,6 +25,12 @@
trap 'echo "Warning: A command has failed. Exiting the script. Line was ($0:$LINENO): $(sed -n "${LINENO}p" "$0")"; exit 3' ERR
set -Eeuo pipefail
+if [[ $(kubectl config current-context) == *oidc@* ]]; then
+ echo "found oidc@ in the current kubectl context. It is likely that you are connected"
+ echo "to the wrong cluster"
+ exit 1
+fi
+
image_path="ghcr.io/syself"
while [[ "$#" -gt 0 ]]; do
@@ -52,6 +58,13 @@ if ! kubectl cluster-info >/dev/null; then
exit 1
fi
+current_context=$(kubectl config current-context)
+if ! echo "$current_context" | grep -P '.*-admin@.*-mgt-cluster'; then
+ echo "The script refuses to update because the current context is: $current_context"
+ echo "Expecting something like foo-mgt-cluster-admin@foo-mgt-cluster with 'foo' being a short version of your name"
+ exit 1
+fi
+
branch=$(git branch --show-current)
if [ "$branch" == "" ]; then
echo "failed to get branch name"
@@ -63,27 +76,39 @@ tag="$(echo -n "$tag" | tr -c 'a-zA-Z0-9_.-' '-')"
image="$image_path/caph-staging:$tag"
-echo "Building image: $image"
+# run in background
+{
+ make generate-manifests
+ kustomize build config/crd | kubectl apply -f -
+} &
-docker build -f images/caph/Dockerfile -t "$image" .
+# run in background2
+{
+ docker build -f images/caph/Dockerfile -t "$image" .
+ docker push "$image"
+} &
-docker push "$image"
+wait
-# Note: Up to now changes in the CRD are not supported by this script.
+ns=$(kubectl get deployments.apps -A | { grep caph-controller || true; } | cut -d' ' -f1)
+if [[ -z $ns ]]; then
+ echo "failed to get namespace for caph-controller"
+ exit 1
+fi
-kubectl scale --replicas=1 -n mgt-system deployment/caph-controller-manager
+kubectl scale --replicas=1 -n "$ns" deployment/caph-controller-manager
-kubectl set image -n mgt-system deployment/caph-controller-manager manager="$image"
+kubectl set image -n "$ns" deployment/caph-controller-manager manager="$image"
-kubectl patch deployment -n mgt-system -p '[{"op": "replace", "path": "/spec/template/spec/containers/0/imagePullPolicy", "value": "Always"}]' --type='json' caph-controller-manager
+kubectl patch deployment -n "$ns" -p '[{"op": "replace", "path": "/spec/template/spec/containers/0/imagePullPolicy", "value": "Always"}]' --type='json' caph-controller-manager
-kubectl rollout restart -n mgt-system deployment caph-controller-manager
+kubectl rollout restart -n "$ns" deployment caph-controller-manager
trap "echo 'Interrupted! Exiting...'; exit 1" SIGINT
-while ! kubectl rollout status deployment --timeout=3s -n mgt-system caph-controller-manager; do
+while ! kubectl rollout status deployment --timeout=3s -n "$ns" caph-controller-manager; do
echo "Rollout failed"
- kubectl events -n mgt-system | grep caph-controller-manager | tail -n 5
+ kubectl events -n "$ns" | grep caph-controller-manager | tail -n 5
echo
echo
done
diff --git a/hack/upgrade-builder-image.sh b/hack/upgrade-builder-image.sh
index a349990c7..3b0bc973f 100755
--- a/hack/upgrade-builder-image.sh
+++ b/hack/upgrade-builder-image.sh
@@ -15,6 +15,7 @@
# limitations under the License.
# This script is executed in the Update-Bot container.
+# Call it via `make builder-image-push`.
# It checks if the Dockerfile for the build container has changed.
# If so, it uses the version of the main branch as the basis for creating a new image tag.
# The script also checks if the image tag for the build image exists in the main branch.
diff --git a/hack/verify-generated-files.sh b/hack/verify-generated-files.sh
new file mode 100755
index 000000000..a4c57f513
--- /dev/null
+++ b/hack/verify-generated-files.sh
@@ -0,0 +1,52 @@
+#!/usr/bin/env bash
+
+# Copyright 2025 The Kubernetes Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Bash Strict Mode: https://github.com/guettli/bash-strict-mode
+trap 'echo -e "\n🤷 🚨 🔥 Warning: A command has failed. Exiting the script. Line was ($0:$LINENO): $(sed -n "${LINENO}p" "$0" 2>/dev/null || true) 🔥 🚨 🤷 "; exit 3' ERR
+set -Eeuo pipefail
+
+if ! git diff --quiet || ! git diff --cached --quiet || [[ -n "$(git ls-files --others --exclude-standard)" ]]; then
+ echo
+ echo "Pre Start of verify-generated-faile.sh"
+ echo "Error: Git repository is not clean. Please commit, stash, or remove your changes and untracked files before proceeding."
+ git status
+ exit 1
+fi
+(
+ cd test/e2e
+ make e2e-cilium-templates
+)
+
+(
+ cd test/e2e
+ make e2e-ccm-templates
+)
+
+make generate
+
+if ! git diff --quiet || ! git diff --cached --quiet || [[ -n "$(git ls-files --others --exclude-standard)" ]]; then
+ echo "After generated files got re-generated:"
+ echo "Error: Git repository is not clean. Please commit, stash, or remove your changes and untracked files before proceeding."
+ git status
+ echo
+ echo "-------------------------"
+ git diff
+ echo
+ echo "git hash: $(git rev-parse HEAD)"
+ exit 1
+fi
+
+echo "OK: No changes in git repo after re-creating generated files".
diff --git a/images/builder/Dockerfile b/images/builder/Dockerfile
index 8267b8d63..ba3b6f091 100644
--- a/images/builder/Dockerfile
+++ b/images/builder/Dockerfile
@@ -30,7 +30,7 @@ RUN apk add --no-cache curl && \
# Install Golang CI Lint
FROM docker.io/library/alpine:3.21.3@sha256:a8560b36e8b8210634f77d9f7f9efd7ffa463e380b75e2e74aff4511df3ef88c AS golangci
# update: datasource=github-tags depName=golangci/golangci-lint versioning=semver
-ENV GOLANGCI_VERSION="v1.64.7"
+ENV GOLANGCI_VERSION="v1.64.8"
WORKDIR /
# hadolint ignore=DL3018,DL4006
RUN apk add --no-cache curl && \
@@ -45,7 +45,7 @@ FROM docker.io/aquasec/trivy:0.60.0@sha256:91c3a842834563a6860dbaec5af7c1949df5c
############################
# Caph Build Image Base #
############################
-FROM docker.io/library/golang:1.23.7-bullseye@sha256:c4f892cd1906e6bf8a0e181f48babf76331c6f5dc786b709ffc9f591cb7edece
+FROM docker.io/library/golang:1.24.5-bullseye@sha256:62ba6b19de03e891f7fa1001326bd48411f2626ff35e7ba5b9d890711ce581d9
# update: datasource=repology depName=debian_11/skopeo versioning=loose
ENV SKOPEO_VERSION="1.2.2+dfsg1-1+b6"
@@ -67,6 +67,10 @@ RUN apt-get update && \
yamllint==${YAMLLINT_VERSION} \
yamlfixer-opt-nc==${YAMLFIXER_VERSION}
+
+# Install controller-gen (current version)
+RUN go install sigs.k8s.io/controller-tools/cmd/controller-gen@v0.18.0
+
COPY --from=lychee /usr/bin/lychee /usr/bin/lychee
COPY --from=golangci /bin/golangci-lint /usr/local/bin
COPY --from=hadolint /bin/hadolint /usr/bin/hadolint
@@ -74,6 +78,12 @@ COPY --from=trivy /usr/local/bin/trivy /usr/bin/trivy
ENV GOCACHE=/go/cache
+## Install Helm
+# update: datasource=github-tags depName=helm/helm versioning=semver
+ENV HELM_VERSION="v3.18.6"
+RUN curl -sSL https://get.helm.sh/helm-${HELM_VERSION}-linux-amd64.tar.gz | tar xz -C /usr/local/bin --strip-components=1 linux-amd64/helm
+RUN chmod a+rx /usr/local/bin/helm
+
COPY build.sh /
RUN chmod +x /build.sh
ENTRYPOINT ["/build.sh"]
diff --git a/images/caph/Dockerfile b/images/caph/Dockerfile
index 5fc628589..2df8b641f 100644
--- a/images/caph/Dockerfile
+++ b/images/caph/Dockerfile
@@ -13,7 +13,7 @@
# limitations under the License.
# Build the manager binary
-FROM --platform=${BUILDPLATFORM} docker.io/library/golang:1.23.7-bullseye@sha256:15c182db16ffc2d7abc4db2b09f468129b9adec2f7c2ddaa0c909d8cb06f13b5 \
+FROM --platform=${BUILDPLATFORM} docker.io/library/golang:1.24.5-bullseye@sha256:62ba6b19de03e891f7fa1001326bd48411f2626ff35e7ba5b9d890711ce581d9 \
AS build
ARG TARGETOS TARGETARCH
diff --git a/main.go b/main.go
index 38da2f8c0..f70ec14e9 100644
--- a/main.go
+++ b/main.go
@@ -58,7 +58,7 @@ var (
// We do not want filenames to start with a dot or a number.
// Only lowercase letters are allowed.
- preProvisionCommandRegex = regexp.MustCompile(`^[a-z][a-z0-9_.-]+[a-z0-9]$`)
+ commandRegex = regexp.MustCompile(`^[a-z][a-z0-9_.-]+[a-z0-9]$`)
)
func init() {
@@ -85,7 +85,10 @@ var (
syncPeriod time.Duration
rateLimitWaitTime time.Duration
preProvisionCommand string
+ hcloudImageURLCommand string
+ baremetalImageURLCommand string
skipWebhooks bool
+ sshAfterInstallImage bool
)
func main() {
@@ -106,7 +109,11 @@ func main() {
fs.DurationVar(&rateLimitWaitTime, "rate-limit", 5*time.Minute, "The rate limiting for HCloud controller (e.g. 5m)")
fs.BoolVar(&hcloudclient.DebugAPICalls, "debug-hcloud-api-calls", false, "Debug all calls to the hcloud API.")
fs.StringVar(&preProvisionCommand, "pre-provision-command", "", "Command to run (in rescue-system) before installing the image on bare metal servers. You can use that to check if the machine is healthy before installing the image. If the exit value is non-zero, the machine is considered unhealthy. This command must be accessible by the controller pod. You can use an initContainer to copy the command to a shared emptyDir.")
+ fs.StringVar(&hcloudImageURLCommand, "hcloud-image-url-command", "", "Command to run (in rescue-system) to provision an hcloud machine. Docs: https://syself.com/docs/caph/developers/image-url-command")
+ fs.StringVar(&baremetalImageURLCommand, "baremetal-image-url-command", "", "Command to run (in rescue-system) to provision an baremetal machine. Docs: https://syself.com/docs/caph/developers/image-url-command")
fs.BoolVar(&skipWebhooks, "skip-webhooks", false, "Skip setting up of webhooks. Together with --leader-elect=false, you can use `go run main.go` to run CAPH in a cluster connected via KUBECONFIG. You should scale down the caph deployment to 0 before doing that. This is only for testing!")
+ fs.BoolVar(&sshAfterInstallImage, "baremetal-ssh-after-install-image", true, "Connect to the baremetal machine after install-image and ensure it is provisioned. Current default is true, but we might change that to false. Background: Users might not want the controller to be able to ssh onto the servers")
+
pflag.CommandLine.AddGoFlagSet(flag.CommandLine)
pflag.Parse()
@@ -115,8 +122,8 @@ func main() {
// If preProvisionCommand is set, check if the file exists and validate the basename.
if preProvisionCommand != "" {
baseName := filepath.Base(preProvisionCommand)
- if !preProvisionCommandRegex.MatchString(baseName) {
- msg := fmt.Sprintf("basename of pre-provision-command (%s) must match the regex %s", baseName, preProvisionCommandRegex.String())
+ if !commandRegex.MatchString(baseName) {
+ msg := fmt.Sprintf("basename (%s) must match the regex %s", baseName, commandRegex.String())
setupLog.Error(errors.New(msg), "")
os.Exit(1)
}
@@ -128,6 +135,38 @@ func main() {
}
}
+ // If hcloudImageURLCommand is set, check if the file exists and validate the basename.
+ if hcloudImageURLCommand != "" {
+ baseName := filepath.Base(hcloudImageURLCommand)
+ if !commandRegex.MatchString(baseName) {
+ msg := fmt.Sprintf("basename (%s) must match the regex %s", baseName, commandRegex.String())
+ setupLog.Error(errors.New(msg), "")
+ os.Exit(1)
+ }
+
+ _, err := os.Stat(hcloudImageURLCommand)
+ if err != nil {
+ setupLog.Error(err, "hcloud-image-url-command not found")
+ os.Exit(1)
+ }
+ }
+
+ // If baremetalImageURLCommand is set, check if the file exists and validate the basename.
+ if baremetalImageURLCommand != "" {
+ baseName := filepath.Base(baremetalImageURLCommand)
+ if !commandRegex.MatchString(baseName) {
+ msg := fmt.Sprintf("basename (%s) must match the regex %s", baseName, commandRegex.String())
+ setupLog.Error(errors.New(msg), "")
+ os.Exit(1)
+ }
+
+ _, err := os.Stat(baremetalImageURLCommand)
+ if err != nil {
+ setupLog.Error(err, "baremetal-image-url-command not found")
+ os.Exit(1)
+ }
+ }
+
var watchNamespaces map[string]cache.Config
if watchNamespace != "" {
watchNamespaces = map[string]cache.Config{
@@ -192,7 +231,9 @@ func main() {
APIReader: mgr.GetAPIReader(),
RateLimitWaitTime: rateLimitWaitTime,
HCloudClientFactory: hcloudClientFactory,
+ SSHClientFactory: sshclient.NewFactory(),
WatchFilterValue: watchFilterValue,
+ ImageURLCommand: hcloudImageURLCommand,
}).SetupWithManager(ctx, mgr, controller.Options{MaxConcurrentReconciles: hcloudMachineConcurrency}); err != nil {
setupLog.Error(err, "unable to create controller", "controller", "HCloudMachine")
os.Exit(1)
@@ -210,13 +251,15 @@ func main() {
}
if err = (&controllers.HetznerBareMetalHostReconciler{
- Client: mgr.GetClient(),
- RobotClientFactory: robotclient.NewFactory(),
- SSHClientFactory: sshclient.NewFactory(),
- APIReader: mgr.GetAPIReader(),
- RateLimitWaitTime: rateLimitWaitTime,
- WatchFilterValue: watchFilterValue,
- PreProvisionCommand: preProvisionCommand,
+ Client: mgr.GetClient(),
+ RobotClientFactory: robotclient.NewFactory(),
+ SSHClientFactory: sshclient.NewFactory(),
+ APIReader: mgr.GetAPIReader(),
+ RateLimitWaitTime: rateLimitWaitTime,
+ WatchFilterValue: watchFilterValue,
+ PreProvisionCommand: preProvisionCommand,
+ ImageURLCommand: baremetalImageURLCommand,
+ SSHAfterInstallImage: sshAfterInstallImage,
}).SetupWithManager(ctx, mgr, controller.Options{MaxConcurrentReconciles: hetznerBareMetalHostConcurrency}); err != nil {
setupLog.Error(err, "unable to create controller", "controller", "HetznerBareMetalHost")
os.Exit(1)
@@ -292,11 +335,11 @@ func setUpWebhookWithManager(mgr ctrl.Manager) {
setupLog.Error(err, "unable to create webhook", "webhook", "HCloudMachine")
os.Exit(1)
}
- if err := (&infrastructurev1beta1.HCloudMachineTemplateWebhook{}).SetupWebhookWithManager(mgr); err != nil {
+ if err := (&infrastructurev1beta1.HCloudMachineTemplate{}).SetupWebhookWithManager(mgr); err != nil {
setupLog.Error(err, "unable to create webhook", "webhook", "HCloudMachineTemplate")
os.Exit(1)
}
- if err := (&infrastructurev1beta1.HetznerBareMetalHostWebhook{}).SetupWebhookWithManager(mgr); err != nil {
+ if err := (&infrastructurev1beta1.HetznerBareMetalHost{}).SetupWebhookWithManager(mgr); err != nil {
setupLog.Error(err, "unable to create webhook", "webhook", "HetznerBareMetalHost")
os.Exit(1)
}
@@ -304,7 +347,7 @@ func setUpWebhookWithManager(mgr ctrl.Manager) {
setupLog.Error(err, "unable to create webhook", "webhook", "HetznerBareMetalMachine")
os.Exit(1)
}
- if err := (&infrastructurev1beta1.HetznerBareMetalMachineTemplateWebhook{}).SetupWebhookWithManager(mgr); err != nil {
+ if err := (&infrastructurev1beta1.HetznerBareMetalMachineTemplate{}).SetupWebhookWithManager(mgr); err != nil {
setupLog.Error(err, "unable to create webhook", "webhook", "HetznerBareMetalMachineTemplate")
os.Exit(1)
}
diff --git a/metadata.yaml b/metadata.yaml
index bb5893f39..467f08d28 100644
--- a/metadata.yaml
+++ b/metadata.yaml
@@ -4,6 +4,7 @@
#
# update this file only when a new major or minor version is released
apiVersion: clusterctl.cluster.x-k8s.io/v1alpha3
+kind: Metadata
releaseSeries:
- major: 1
minor: 0
diff --git a/pkg/scope/baremetalhost.go b/pkg/scope/baremetalhost.go
index 7aecc9469..e8c7df003 100644
--- a/pkg/scope/baremetalhost.go
+++ b/pkg/scope/baremetalhost.go
@@ -48,6 +48,8 @@ type BareMetalHostScopeParams struct {
RescueSSHSecret *corev1.Secret
SecretManager *secretutil.SecretManager
PreProvisionCommand string
+ ImageURLCommand string
+ SSHAfterInstallImage bool
}
// NewBareMetalHostScope creates a new Scope from the supplied parameters.
@@ -93,23 +95,34 @@ func NewBareMetalHostScope(params BareMetalHostScopeParams) (*BareMetalHostScope
RescueSSHSecret: params.RescueSSHSecret,
SecretManager: params.SecretManager,
PreProvisionCommand: params.PreProvisionCommand,
+ SSHAfterInstallImage: params.SSHAfterInstallImage,
+ WorkloadClusterClientFactory: &realWorkloadClusterClientFactory{
+ logger: params.Logger,
+ client: params.Client,
+ cluster: params.Cluster,
+ hetznerCluster: params.HetznerCluster,
+ },
+ ImageURLCommand: params.ImageURLCommand,
}, nil
}
// BareMetalHostScope defines the basic context for an actuator to operate upon.
type BareMetalHostScope struct {
logr.Logger
- Client client.Client
- SecretManager *secretutil.SecretManager
- RobotClient robotclient.Client
- SSHClientFactory sshclient.Factory
- HetznerBareMetalHost *infrav1.HetznerBareMetalHost
- HetznerBareMetalMachine *infrav1.HetznerBareMetalMachine
- HetznerCluster *infrav1.HetznerCluster
- Cluster *clusterv1.Cluster
- OSSSHSecret *corev1.Secret
- RescueSSHSecret *corev1.Secret
- PreProvisionCommand string
+ Client client.Client
+ SecretManager *secretutil.SecretManager
+ RobotClient robotclient.Client
+ SSHClientFactory sshclient.Factory
+ HetznerBareMetalHost *infrav1.HetznerBareMetalHost
+ HetznerBareMetalMachine *infrav1.HetznerBareMetalMachine
+ HetznerCluster *infrav1.HetznerCluster
+ Cluster *clusterv1.Cluster
+ OSSSHSecret *corev1.Secret
+ RescueSSHSecret *corev1.Secret
+ PreProvisionCommand string
+ SSHAfterInstallImage bool
+ WorkloadClusterClientFactory WorkloadClusterClientFactory
+ ImageURLCommand string
}
// Name returns the HetznerCluster name.
diff --git a/pkg/scope/cluster.go b/pkg/scope/cluster.go
index 42c22d54f..9c73dd40d 100644
--- a/pkg/scope/cluster.go
+++ b/pkg/scope/cluster.go
@@ -29,23 +29,22 @@ import (
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
"sigs.k8s.io/cluster-api/util/conditions"
"sigs.k8s.io/cluster-api/util/patch"
- "sigs.k8s.io/cluster-api/util/secret"
"sigs.k8s.io/controller-runtime/pkg/client"
infrav1 "github.com/syself/cluster-api-provider-hetzner/api/v1beta1"
- secretutil "github.com/syself/cluster-api-provider-hetzner/pkg/secrets"
hcloudclient "github.com/syself/cluster-api-provider-hetzner/pkg/services/hcloud/client"
)
// ClusterScopeParams defines the input parameters used to create a new scope.
type ClusterScopeParams struct {
- Client client.Client
- APIReader client.Reader
- Logger logr.Logger
- HetznerSecret *corev1.Secret
- HCloudClient hcloudclient.Client
- Cluster *clusterv1.Cluster
- HetznerCluster *infrav1.HetznerCluster
+ Client client.Client
+ APIReader client.Reader
+ Logger logr.Logger
+ HetznerSecret *corev1.Secret
+ HCloudClient hcloudclient.Client
+ Cluster *clusterv1.Cluster
+ HetznerCluster *infrav1.HetznerCluster
+ ImageURLCommand string
}
// NewClusterScope creates a new Scope from the supplied parameters.
@@ -148,40 +147,7 @@ func (s *ClusterScope) ControlPlaneAPIEndpointPort() int32 {
// ClientConfig return a kubernetes client config for the cluster context.
func (s *ClusterScope) ClientConfig(ctx context.Context) (clientcmd.ClientConfig, error) {
- cluster := client.ObjectKey{
- Name: fmt.Sprintf("%s-%s", s.Cluster.Name, secret.Kubeconfig),
- Namespace: s.Cluster.Namespace,
- }
-
- secretManager := secretutil.NewSecretManager(s.Logger, s.Client, s.APIReader)
- kubeconfigSecret, err := secretManager.AcquireSecret(ctx, cluster, s.HetznerCluster, false, false)
- if err != nil {
- return nil, fmt.Errorf("failed to acquire secret: %w", err)
- }
- kubeconfigBytes, ok := kubeconfigSecret.Data[secret.KubeconfigDataName]
- if !ok {
- return nil, fmt.Errorf("missing key %q in secret data", secret.KubeconfigDataName)
- }
- return clientcmd.NewClientConfigFromBytes(kubeconfigBytes)
-}
-
-// ClientConfigWithAPIEndpoint returns a client config.
-func (s *ClusterScope) ClientConfigWithAPIEndpoint(ctx context.Context, endpoint clusterv1.APIEndpoint) (clientcmd.ClientConfig, error) {
- c, err := s.ClientConfig(ctx)
- if err != nil {
- return nil, err
- }
-
- raw, err := c.RawConfig()
- if err != nil {
- return nil, fmt.Errorf("error retrieving rawConfig from clientConfig: %w", err)
- }
- // update cluster endpint in config
- for key := range raw.Clusters {
- raw.Clusters[key].Server = fmt.Sprintf("https://%s:%d", endpoint.Host, endpoint.Port)
- }
-
- return clientcmd.NewDefaultClientConfig(raw, &clientcmd.ConfigOverrides{}), nil
+ return workloadClientConfigFromKubeconfigSecret(ctx, s.Logger, s.Client, s.APIReader, s.Cluster, s.HetznerCluster)
}
// ListMachines returns HCloudMachines.
diff --git a/pkg/scope/machine.go b/pkg/scope/machine.go
index 3b7e6b58a..49a003bff 100644
--- a/pkg/scope/machine.go
+++ b/pkg/scope/machine.go
@@ -28,20 +28,23 @@ import (
"k8s.io/apimachinery/pkg/types"
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
- capierrors "sigs.k8s.io/cluster-api/errors"
+ capierrors "sigs.k8s.io/cluster-api/errors" //nolint:staticcheck // we will handle that, when we update to capi v1.11
"sigs.k8s.io/cluster-api/util"
"sigs.k8s.io/cluster-api/util/conditions"
"sigs.k8s.io/cluster-api/util/patch"
infrav1 "github.com/syself/cluster-api-provider-hetzner/api/v1beta1"
secretutil "github.com/syself/cluster-api-provider-hetzner/pkg/secrets"
+ sshclient "github.com/syself/cluster-api-provider-hetzner/pkg/services/baremetal/client/ssh"
)
// MachineScopeParams defines the input parameters used to create a new Scope.
type MachineScopeParams struct {
ClusterScopeParams
- Machine *clusterv1.Machine
- HCloudMachine *infrav1.HCloudMachine
+ Machine *clusterv1.Machine
+ HCloudMachine *infrav1.HCloudMachine
+ SSHClientFactory sshclient.Factory
+ ImageURLCommand string
}
const maxShutDownTime = 2 * time.Minute
@@ -80,17 +83,21 @@ func NewMachineScope(params MachineScopeParams) (*MachineScope, error) {
}
return &MachineScope{
- ClusterScope: *cs,
- Machine: params.Machine,
- HCloudMachine: params.HCloudMachine,
+ ClusterScope: *cs,
+ Machine: params.Machine,
+ HCloudMachine: params.HCloudMachine,
+ ImageURLCommand: params.ImageURLCommand,
+ SSHClientFactory: params.SSHClientFactory,
}, nil
}
// MachineScope defines the basic context for an actuator to operate upon.
type MachineScope struct {
ClusterScope
- Machine *clusterv1.Machine
- HCloudMachine *infrav1.HCloudMachine
+ Machine *clusterv1.Machine
+ HCloudMachine *infrav1.HCloudMachine
+ ImageURLCommand string
+ SSHClientFactory sshclient.Factory
}
// Close closes the current scope persisting the cluster configuration and status.
@@ -122,6 +129,7 @@ func (m *MachineScope) PatchObject(ctx context.Context) error {
// SetError sets the ErrorMessage and ErrorReason fields on the machine and logs
// the message. It assumes the reason is invalid configuration, since that is
// currently the only relevant MachineStatusError choice.
+// CAPI will delete the machine and create a new one.
func (m *MachineScope) SetError(message string, reason capierrors.MachineStatusError) {
m.HCloudMachine.Status.FailureMessage = &message
m.HCloudMachine.Status.FailureReason = &reason
diff --git a/pkg/scope/workloadclusterclient.go b/pkg/scope/workloadclusterclient.go
new file mode 100644
index 000000000..1dfe985ee
--- /dev/null
+++ b/pkg/scope/workloadclusterclient.go
@@ -0,0 +1,86 @@
+/*
+Copyright 2022 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package scope defines cluster and machine scope as well as a repository for the Hetzner API.
+package scope
+
+import (
+ "context"
+ "fmt"
+
+ "github.com/go-logr/logr"
+ "k8s.io/client-go/tools/clientcmd"
+ clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
+ "sigs.k8s.io/cluster-api/util/secret"
+ "sigs.k8s.io/controller-runtime/pkg/client"
+
+ infrav1 "github.com/syself/cluster-api-provider-hetzner/api/v1beta1"
+ secretutil "github.com/syself/cluster-api-provider-hetzner/pkg/secrets"
+)
+
+// workloadClientConfigFromKubeconfigSecret creates a kubernetes client config from kubeconfig secret.
+func workloadClientConfigFromKubeconfigSecret(ctx context.Context, logger logr.Logger, cl client.Client, apiReader client.Reader, cluster *clusterv1.Cluster, hetznerCluster *infrav1.HetznerCluster) (clientcmd.ClientConfig, error) {
+ secretKey := client.ObjectKey{
+ Name: fmt.Sprintf("%s-%s", cluster.Name, secret.Kubeconfig),
+ Namespace: cluster.Namespace,
+ }
+
+ secretManager := secretutil.NewSecretManager(logger, cl, apiReader)
+ kubeconfigSecret, err := secretManager.AcquireSecret(ctx, secretKey, hetznerCluster, false, false)
+ if err != nil {
+ return nil, fmt.Errorf("failed to acquire secret: %w", err)
+ }
+ kubeconfigBytes, ok := kubeconfigSecret.Data[secret.KubeconfigDataName]
+ if !ok {
+ return nil, fmt.Errorf("missing key %q in secret data (WorkloadClientConfigFromKubeconfigSecret)", secret.KubeconfigDataName)
+ }
+ return clientcmd.NewClientConfigFromBytes(kubeconfigBytes)
+}
+
+// WorkloadClusterClientFactory is an interface to get a new controller-runtime Client to access a
+// workload-cluster.
+type WorkloadClusterClientFactory interface {
+ // NewWorkloadClient returns a new client connected to the workload-cluster
+ NewWorkloadClient(ctx context.Context) (client.Client, error)
+}
+
+type realWorkloadClusterClientFactory struct {
+ logger logr.Logger
+ client client.Client
+ cluster *clusterv1.Cluster
+ hetznerCluster *infrav1.HetznerCluster
+}
+
+func (f *realWorkloadClusterClientFactory) NewWorkloadClient(ctx context.Context) (client.Client, error) {
+ wlConfig, err := workloadClientConfigFromKubeconfigSecret(ctx, f.logger,
+ f.client, f.client, f.cluster, f.hetznerCluster)
+ if err != nil {
+ return nil, fmt.Errorf("actionProvisioned (Reboot via Annotation),WorkloadClientConfigFromKubeconfigSecret failed: %w",
+ err)
+ }
+
+ // getting client
+ restConfig, err := wlConfig.ClientConfig()
+ if err != nil {
+ return nil, fmt.Errorf("actionProvisioned (Reboot via Annotation), failed to get rest config: %w", err)
+ }
+
+ wlClient, err := client.New(restConfig, client.Options{})
+ if err != nil {
+ return nil, fmt.Errorf("client.New failed: %w", err)
+ }
+ return wlClient, nil
+}
diff --git a/pkg/services/baremetal/baremetal/baremetal.go b/pkg/services/baremetal/baremetal/baremetal.go
index 0117fdc78..50a8f9275 100644
--- a/pkg/services/baremetal/baremetal/baremetal.go
+++ b/pkg/services/baremetal/baremetal/baremetal.go
@@ -39,7 +39,7 @@ import (
"k8s.io/apimachinery/pkg/selection"
"k8s.io/utils/ptr"
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
- capierrors "sigs.k8s.io/cluster-api/errors"
+ capierrors "sigs.k8s.io/cluster-api/errors" //nolint:staticcheck // we will handle that, when we update to capi v1.11
"sigs.k8s.io/cluster-api/util/conditions"
"sigs.k8s.io/cluster-api/util/patch"
"sigs.k8s.io/cluster-api/util/record"
@@ -833,6 +833,9 @@ func nodeAddresses(host *infrav1.HetznerBareMetalHost, bareMetalMachineName stri
addrs := make([]clusterv1.MachineAddress, 0, len(host.Spec.Status.HardwareDetails.NIC)+2)
for _, nic := range host.Spec.Status.HardwareDetails.NIC {
+ if nic.IP == "" {
+ continue
+ }
address := clusterv1.MachineAddress{
Type: clusterv1.MachineInternalIP,
Address: nic.IP,
diff --git a/pkg/services/baremetal/client/.mockery.yaml b/pkg/services/baremetal/client/.mockery.yaml
deleted file mode 100644
index 3b8149814..000000000
--- a/pkg/services/baremetal/client/.mockery.yaml
+++ /dev/null
@@ -1,16 +0,0 @@
-# This config was choosen, so that the output matches to old structure (pre config file .mockery.yaml).
-# If you are here to copy this config to a new project, then it might
-# make sense to choose a structure which needs less config by using
-# the default values of Mockery.
-all: True
-filename: "{{.InterfaceName}}.go"
-mockname: "{{.InterfaceName}}"
-outpkg: mocks
-packages:
- github.com/syself/cluster-api-provider-hetzner/pkg/services/baremetal/client/ssh:
- config:
- dir: mocks/ssh
-
- github.com/syself/cluster-api-provider-hetzner/pkg/services/baremetal/client/robot:
- config:
- dir: mocks/robot
diff --git a/pkg/services/baremetal/client/mocks/robot/Client.go b/pkg/services/baremetal/client/mocks/robot/Client.go
index dc4d22b1f..9b112ca46 100644
--- a/pkg/services/baremetal/client/mocks/robot/Client.go
+++ b/pkg/services/baremetal/client/mocks/robot/Client.go
@@ -1,4 +1,4 @@
-// Code generated by mockery v2.40.2. DO NOT EDIT.
+// Code generated by mockery v2.53.4. DO NOT EDIT.
package mocks
@@ -254,7 +254,7 @@ func (_c *Client_GetReboot_Call) RunAndReturn(run func(int) (*models.Reset, erro
return _c
}
-// ListBMServers provides a mock function with given fields:
+// ListBMServers provides a mock function with no fields
func (_m *Client) ListBMServers() ([]models.Server, error) {
ret := _m.Called()
@@ -311,7 +311,7 @@ func (_c *Client_ListBMServers_Call) RunAndReturn(run func() ([]models.Server, e
return _c
}
-// ListSSHKeys provides a mock function with given fields:
+// ListSSHKeys provides a mock function with no fields
func (_m *Client) ListSSHKeys() ([]models.Key, error) {
ret := _m.Called()
@@ -604,7 +604,7 @@ func (_c *Client_SetSSHKey_Call) RunAndReturn(run func(string, string) (*models.
return _c
}
-// ValidateCredentials provides a mock function with given fields:
+// ValidateCredentials provides a mock function with no fields
func (_m *Client) ValidateCredentials() error {
ret := _m.Called()
diff --git a/pkg/services/baremetal/client/mocks/robot/Factory.go b/pkg/services/baremetal/client/mocks/robot/Factory.go
index fc9b6f216..1f150a78d 100644
--- a/pkg/services/baremetal/client/mocks/robot/Factory.go
+++ b/pkg/services/baremetal/client/mocks/robot/Factory.go
@@ -1,4 +1,4 @@
-// Code generated by mockery v2.40.2. DO NOT EDIT.
+// Code generated by mockery v2.53.4. DO NOT EDIT.
package mocks
diff --git a/pkg/services/baremetal/client/mocks/ssh/Client.go b/pkg/services/baremetal/client/mocks/ssh/Client.go
index bd444a586..7f1940076 100644
--- a/pkg/services/baremetal/client/mocks/ssh/Client.go
+++ b/pkg/services/baremetal/client/mocks/ssh/Client.go
@@ -1,4 +1,4 @@
-// Code generated by mockery v2.40.2. DO NOT EDIT.
+// Code generated by mockery v2.53.4. DO NOT EDIT.
package mocks
@@ -22,7 +22,7 @@ func (_m *Client) EXPECT() *Client_Expecter {
return &Client_Expecter{mock: &_m.Mock}
}
-// CheckCloudInitLogsForSigTerm provides a mock function with given fields:
+// CheckCloudInitLogsForSigTerm provides a mock function with no fields
func (_m *Client) CheckCloudInitLogsForSigTerm() sshclient.Output {
ret := _m.Called()
@@ -124,7 +124,7 @@ func (_c *Client_CheckDisk_Call) RunAndReturn(run func(context.Context, []string
return _c
}
-// CleanCloudInitInstances provides a mock function with given fields:
+// CleanCloudInitInstances provides a mock function with no fields
func (_m *Client) CleanCloudInitInstances() sshclient.Output {
ret := _m.Called()
@@ -169,7 +169,7 @@ func (_c *Client_CleanCloudInitInstances_Call) RunAndReturn(run func() sshclient
return _c
}
-// CleanCloudInitLogs provides a mock function with given fields:
+// CleanCloudInitLogs provides a mock function with no fields
func (_m *Client) CleanCloudInitLogs() sshclient.Output {
ret := _m.Called()
@@ -214,7 +214,7 @@ func (_c *Client_CleanCloudInitLogs_Call) RunAndReturn(run func() sshclient.Outp
return _c
}
-// CloudInitStatus provides a mock function with given fields:
+// CloudInitStatus provides a mock function with no fields
func (_m *Client) CloudInitStatus() sshclient.Output {
ret := _m.Called()
@@ -554,7 +554,7 @@ func (_c *Client_ExecutePreProvisionCommand_Call) RunAndReturn(run func(context.
return _c
}
-// GetCloudInitOutput provides a mock function with given fields:
+// GetCloudInitOutput provides a mock function with no fields
func (_m *Client) GetCloudInitOutput() sshclient.Output {
ret := _m.Called()
@@ -599,7 +599,7 @@ func (_c *Client_GetCloudInitOutput_Call) RunAndReturn(run func() sshclient.Outp
return _c
}
-// GetHardwareDetailsCPUArch provides a mock function with given fields:
+// GetHardwareDetailsCPUArch provides a mock function with no fields
func (_m *Client) GetHardwareDetailsCPUArch() sshclient.Output {
ret := _m.Called()
@@ -644,7 +644,7 @@ func (_c *Client_GetHardwareDetailsCPUArch_Call) RunAndReturn(run func() sshclie
return _c
}
-// GetHardwareDetailsCPUClockGigahertz provides a mock function with given fields:
+// GetHardwareDetailsCPUClockGigahertz provides a mock function with no fields
func (_m *Client) GetHardwareDetailsCPUClockGigahertz() sshclient.Output {
ret := _m.Called()
@@ -689,7 +689,7 @@ func (_c *Client_GetHardwareDetailsCPUClockGigahertz_Call) RunAndReturn(run func
return _c
}
-// GetHardwareDetailsCPUCores provides a mock function with given fields:
+// GetHardwareDetailsCPUCores provides a mock function with no fields
func (_m *Client) GetHardwareDetailsCPUCores() sshclient.Output {
ret := _m.Called()
@@ -734,7 +734,7 @@ func (_c *Client_GetHardwareDetailsCPUCores_Call) RunAndReturn(run func() sshcli
return _c
}
-// GetHardwareDetailsCPUFlags provides a mock function with given fields:
+// GetHardwareDetailsCPUFlags provides a mock function with no fields
func (_m *Client) GetHardwareDetailsCPUFlags() sshclient.Output {
ret := _m.Called()
@@ -779,7 +779,7 @@ func (_c *Client_GetHardwareDetailsCPUFlags_Call) RunAndReturn(run func() sshcli
return _c
}
-// GetHardwareDetailsCPUModel provides a mock function with given fields:
+// GetHardwareDetailsCPUModel provides a mock function with no fields
func (_m *Client) GetHardwareDetailsCPUModel() sshclient.Output {
ret := _m.Called()
@@ -824,7 +824,7 @@ func (_c *Client_GetHardwareDetailsCPUModel_Call) RunAndReturn(run func() sshcli
return _c
}
-// GetHardwareDetailsCPUThreads provides a mock function with given fields:
+// GetHardwareDetailsCPUThreads provides a mock function with no fields
func (_m *Client) GetHardwareDetailsCPUThreads() sshclient.Output {
ret := _m.Called()
@@ -869,7 +869,7 @@ func (_c *Client_GetHardwareDetailsCPUThreads_Call) RunAndReturn(run func() sshc
return _c
}
-// GetHardwareDetailsDebug provides a mock function with given fields:
+// GetHardwareDetailsDebug provides a mock function with no fields
func (_m *Client) GetHardwareDetailsDebug() sshclient.Output {
ret := _m.Called()
@@ -914,7 +914,7 @@ func (_c *Client_GetHardwareDetailsDebug_Call) RunAndReturn(run func() sshclient
return _c
}
-// GetHardwareDetailsNics provides a mock function with given fields:
+// GetHardwareDetailsNics provides a mock function with no fields
func (_m *Client) GetHardwareDetailsNics() sshclient.Output {
ret := _m.Called()
@@ -959,7 +959,7 @@ func (_c *Client_GetHardwareDetailsNics_Call) RunAndReturn(run func() sshclient.
return _c
}
-// GetHardwareDetailsRAM provides a mock function with given fields:
+// GetHardwareDetailsRAM provides a mock function with no fields
func (_m *Client) GetHardwareDetailsRAM() sshclient.Output {
ret := _m.Called()
@@ -1004,7 +1004,7 @@ func (_c *Client_GetHardwareDetailsRAM_Call) RunAndReturn(run func() sshclient.O
return _c
}
-// GetHardwareDetailsStorage provides a mock function with given fields:
+// GetHardwareDetailsStorage provides a mock function with no fields
func (_m *Client) GetHardwareDetailsStorage() sshclient.Output {
ret := _m.Called()
@@ -1049,7 +1049,7 @@ func (_c *Client_GetHardwareDetailsStorage_Call) RunAndReturn(run func() sshclie
return _c
}
-// GetHostName provides a mock function with given fields:
+// GetHostName provides a mock function with no fields
func (_m *Client) GetHostName() sshclient.Output {
ret := _m.Called()
@@ -1094,7 +1094,7 @@ func (_c *Client_GetHostName_Call) RunAndReturn(run func() sshclient.Output) *Cl
return _c
}
-// GetInstallImageState provides a mock function with given fields:
+// GetInstallImageState provides a mock function with no fields
func (_m *Client) GetInstallImageState() (sshclient.InstallImageState, error) {
ret := _m.Called()
@@ -1149,7 +1149,7 @@ func (_c *Client_GetInstallImageState_Call) RunAndReturn(run func() (sshclient.I
return _c
}
-// GetResultOfInstallImage provides a mock function with given fields:
+// GetResultOfInstallImage provides a mock function with no fields
func (_m *Client) GetResultOfInstallImage() (string, error) {
ret := _m.Called()
@@ -1204,7 +1204,7 @@ func (_c *Client_GetResultOfInstallImage_Call) RunAndReturn(run func() (string,
return _c
}
-// Reboot provides a mock function with given fields:
+// Reboot provides a mock function with no fields
func (_m *Client) Reboot() sshclient.Output {
ret := _m.Called()
@@ -1249,7 +1249,7 @@ func (_c *Client_Reboot_Call) RunAndReturn(run func() sshclient.Output) *Client_
return _c
}
-// ResetKubeadm provides a mock function with given fields:
+// ResetKubeadm provides a mock function with no fields
func (_m *Client) ResetKubeadm() sshclient.Output {
ret := _m.Called()
@@ -1294,7 +1294,137 @@ func (_c *Client_ResetKubeadm_Call) RunAndReturn(run func() sshclient.Output) *C
return _c
}
-// UntarTGZ provides a mock function with given fields:
+// StartImageURLCommand provides a mock function with given fields: ctx, command, imageURL, bootstrapData, machineName, deviceNames
+func (_m *Client) StartImageURLCommand(ctx context.Context, command string, imageURL string, bootstrapData []byte, machineName string, deviceNames []string) (int, string, error) {
+ ret := _m.Called(ctx, command, imageURL, bootstrapData, machineName, deviceNames)
+
+ if len(ret) == 0 {
+ panic("no return value specified for StartImageURLCommand")
+ }
+
+ var r0 int
+ var r1 string
+ var r2 error
+ if rf, ok := ret.Get(0).(func(context.Context, string, string, []byte, string, []string) (int, string, error)); ok {
+ return rf(ctx, command, imageURL, bootstrapData, machineName, deviceNames)
+ }
+ if rf, ok := ret.Get(0).(func(context.Context, string, string, []byte, string, []string) int); ok {
+ r0 = rf(ctx, command, imageURL, bootstrapData, machineName, deviceNames)
+ } else {
+ r0 = ret.Get(0).(int)
+ }
+
+ if rf, ok := ret.Get(1).(func(context.Context, string, string, []byte, string, []string) string); ok {
+ r1 = rf(ctx, command, imageURL, bootstrapData, machineName, deviceNames)
+ } else {
+ r1 = ret.Get(1).(string)
+ }
+
+ if rf, ok := ret.Get(2).(func(context.Context, string, string, []byte, string, []string) error); ok {
+ r2 = rf(ctx, command, imageURL, bootstrapData, machineName, deviceNames)
+ } else {
+ r2 = ret.Error(2)
+ }
+
+ return r0, r1, r2
+}
+
+// Client_StartImageURLCommand_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'StartImageURLCommand'
+type Client_StartImageURLCommand_Call struct {
+ *mock.Call
+}
+
+// StartImageURLCommand is a helper method to define mock.On call
+// - ctx context.Context
+// - command string
+// - imageURL string
+// - bootstrapData []byte
+// - machineName string
+// - deviceNames []string
+func (_e *Client_Expecter) StartImageURLCommand(ctx interface{}, command interface{}, imageURL interface{}, bootstrapData interface{}, machineName interface{}, deviceNames interface{}) *Client_StartImageURLCommand_Call {
+ return &Client_StartImageURLCommand_Call{Call: _e.mock.On("StartImageURLCommand", ctx, command, imageURL, bootstrapData, machineName, deviceNames)}
+}
+
+func (_c *Client_StartImageURLCommand_Call) Run(run func(ctx context.Context, command string, imageURL string, bootstrapData []byte, machineName string, deviceNames []string)) *Client_StartImageURLCommand_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(string), args[2].(string), args[3].([]byte), args[4].(string), args[5].([]string))
+ })
+ return _c
+}
+
+func (_c *Client_StartImageURLCommand_Call) Return(exitStatus int, stdoutAndStderr string, err error) *Client_StartImageURLCommand_Call {
+ _c.Call.Return(exitStatus, stdoutAndStderr, err)
+ return _c
+}
+
+func (_c *Client_StartImageURLCommand_Call) RunAndReturn(run func(context.Context, string, string, []byte, string, []string) (int, string, error)) *Client_StartImageURLCommand_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// StateOfImageURLCommand provides a mock function with no fields
+func (_m *Client) StateOfImageURLCommand() (sshclient.ImageURLCommandState, string, error) {
+ ret := _m.Called()
+
+ if len(ret) == 0 {
+ panic("no return value specified for StateOfImageURLCommand")
+ }
+
+ var r0 sshclient.ImageURLCommandState
+ var r1 string
+ var r2 error
+ if rf, ok := ret.Get(0).(func() (sshclient.ImageURLCommandState, string, error)); ok {
+ return rf()
+ }
+ if rf, ok := ret.Get(0).(func() sshclient.ImageURLCommandState); ok {
+ r0 = rf()
+ } else {
+ r0 = ret.Get(0).(sshclient.ImageURLCommandState)
+ }
+
+ if rf, ok := ret.Get(1).(func() string); ok {
+ r1 = rf()
+ } else {
+ r1 = ret.Get(1).(string)
+ }
+
+ if rf, ok := ret.Get(2).(func() error); ok {
+ r2 = rf()
+ } else {
+ r2 = ret.Error(2)
+ }
+
+ return r0, r1, r2
+}
+
+// Client_StateOfImageURLCommand_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'StateOfImageURLCommand'
+type Client_StateOfImageURLCommand_Call struct {
+ *mock.Call
+}
+
+// StateOfImageURLCommand is a helper method to define mock.On call
+func (_e *Client_Expecter) StateOfImageURLCommand() *Client_StateOfImageURLCommand_Call {
+ return &Client_StateOfImageURLCommand_Call{Call: _e.mock.On("StateOfImageURLCommand")}
+}
+
+func (_c *Client_StateOfImageURLCommand_Call) Run(run func()) *Client_StateOfImageURLCommand_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run()
+ })
+ return _c
+}
+
+func (_c *Client_StateOfImageURLCommand_Call) Return(state sshclient.ImageURLCommandState, logFile string, err error) *Client_StateOfImageURLCommand_Call {
+ _c.Call.Return(state, logFile, err)
+ return _c
+}
+
+func (_c *Client_StateOfImageURLCommand_Call) RunAndReturn(run func() (sshclient.ImageURLCommandState, string, error)) *Client_StateOfImageURLCommand_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// UntarTGZ provides a mock function with no fields
func (_m *Client) UntarTGZ() sshclient.Output {
ret := _m.Called()
diff --git a/pkg/services/baremetal/client/mocks/ssh/Factory.go b/pkg/services/baremetal/client/mocks/ssh/Factory.go
index f18f8f3c9..7fe2897ee 100644
--- a/pkg/services/baremetal/client/mocks/ssh/Factory.go
+++ b/pkg/services/baremetal/client/mocks/ssh/Factory.go
@@ -1,4 +1,4 @@
-// Code generated by mockery v2.40.2. DO NOT EDIT.
+// Code generated by mockery v2.53.4. DO NOT EDIT.
package mocks
diff --git a/pkg/services/baremetal/client/ssh/ssh_client.go b/pkg/services/baremetal/client/ssh/ssh_client.go
index 5282d2b7b..23ec55210 100644
--- a/pkg/services/baremetal/client/ssh/ssh_client.go
+++ b/pkg/services/baremetal/client/ssh/ssh_client.go
@@ -30,6 +30,7 @@ import (
"regexp"
"slices"
"strings"
+ "syscall"
"time"
scp "github.com/bramvdbogaerde/go-scp"
@@ -39,6 +40,8 @@ import (
const (
sshTimeOut time.Duration = 5 * time.Second
+
+ imageURLCommandLog = "/root/image-url-command.log"
)
//go:embed detect-linux-on-another-disk.sh
@@ -62,8 +65,6 @@ var (
// ErrCommandExitedWithStatusOne means the ssh command exited with sttatus 1.
ErrCommandExitedWithStatusOne = errors.New("Process exited with status 1") //nolint:stylecheck // this is used to check ssh errors
- // ErrConnectionRefused means the ssh connection was refused.
- ErrConnectionRefused = errors.New("connect: connection refused")
// ErrAuthenticationFailed means ssh was unable to authenticate.
ErrAuthenticationFailed = errors.New("ssh: unable to authenticate")
// ErrEmptyStdOut means that StdOut equals empty string.
@@ -72,7 +73,6 @@ var (
ErrTimeout = errors.New("i/o timeout")
// ErrCheckDiskBrokenDisk means that a disk seams broken.
ErrCheckDiskBrokenDisk = errors.New("CheckDisk failed")
- errSSHDialFailed = errors.New("failed to dial ssh")
)
// Input defines an SSH input.
@@ -101,6 +101,23 @@ const (
InstallImageStateFinished InstallImageState = "finished"
)
+// ImageURLCommandState is the command which reads the imageURL of and provisions the machine accordingly. It gets copied to the server running in the rescue system.
+type ImageURLCommandState string
+
+const (
+ // ImageURLCommandStateNotStarted indicates that the command was not started yet.
+ ImageURLCommandStateNotStarted ImageURLCommandState = "ImageURLCommandStateNotStarted"
+
+ // ImageURLCommandStateRunning indicates that the command is running.
+ ImageURLCommandStateRunning ImageURLCommandState = "ImageURLCommandStateRunning"
+
+ // ImageURLCommandStateFinishedSuccessfully indicates that the command is finished successfully.
+ ImageURLCommandStateFinishedSuccessfully ImageURLCommandState = "ImageURLCommandStateFinishedSuccessfully"
+
+ // ImageURLCommandStateFailed indicates that the command is finished, but failed.
+ ImageURLCommandStateFailed ImageURLCommandState = "ImageURLCommandStateFailed"
+)
+
func (o Output) String() string {
s := make([]string, 0, 3)
stdout := strings.TrimSpace(o.StdOut)
@@ -129,7 +146,7 @@ func (o Output) String() string {
// There are three case:
// First case: Remote command finished with exit 0: 0, nil.
// Second case: Remote command finished with non zero: N, nil.
-// Third case: Remote command was not called successfully (like host not reachable): 0, err.
+// Third case: Remote command was not called successfully (like "host not reachable"): 0, err.
func (o Output) ExitStatus() (int, error) {
var exitError *ssh.ExitError
if errors.As(o.Err, &exitError) {
@@ -178,6 +195,19 @@ type Client interface {
// ExecutePreProvisionCommand executes a command before the provision process starts.
// A non-zero exit status will indicate that provisioning should not start.
ExecutePreProvisionCommand(ctx context.Context, preProvisionCommand string) (exitStatus int, stdoutAndStderr string, err error)
+
+ // StartImageURLCommand calls the command provided via image-url-command.
+ // It gets called by the controller after the rescue system of the new machine
+ // is reachable. The env var `OCI_REGISTRY_AUTH_TOKEN` gets set to the same value of the
+ // corresponding env var of the controller.
+ // This gets used when imageURL set.
+ // For hcloud deviceNames is always {"sda"}. For baremetal it corresponds to the WWNs
+ // of RootDeviceHints.
+ StartImageURLCommand(ctx context.Context, command, imageURL string, bootstrapData []byte, machineName string, deviceNames []string) (exitStatus int, stdoutAndStderr string, err error)
+
+ // StateOfImageURLCommand returns the current states of the ImageURLCommand. States can
+ // be: NotStarted, Running, Failed, FinishedSuccesfully.
+ StateOfImageURLCommand() (state ImageURLCommandState, logFile string, err error)
}
// Factory is the interface for creating new Client objects.
@@ -406,11 +436,7 @@ func (c *sshClient) Reboot() Output {
// CloudInitStatus implements the CloudInitStatus method of the SSHClient interface.
func (c *sshClient) CloudInitStatus() Output {
- out := c.runSSH("cloud-init status")
- if out.Err != nil && strings.Contains(out.Err.Error(), ErrCommandExitedWithStatusOne.Error()) {
- return Output{StdOut: "status: error"}
- }
- return out
+ return c.runSSH("cloud-init status")
}
// CheckCloudInitLogsForSigTerm implements the CheckCloudInitLogsForSigTerm method of the SSHClient interface.
@@ -547,7 +573,7 @@ func (c *sshClient) UntarTGZ() Output {
// IsConnectionRefusedError checks whether the ssh error is a connection refused error.
func IsConnectionRefusedError(err error) bool {
- return strings.Contains(err.Error(), ErrConnectionRefused.Error())
+ return errors.Is(err, syscall.ECONNREFUSED)
}
// IsAuthenticationFailedError checks whether the ssh error is an authentication failed error.
@@ -585,7 +611,7 @@ func (c *sshClient) getSSHClient() (*ssh.Client, error) {
// Connect to the remote server and perform the SSH handshake.
client, err := ssh.Dial("tcp", fmt.Sprintf("%s:%v", c.ip, c.port), config)
if err != nil {
- return nil, fmt.Errorf("failed to dial ssh. Error message: %s. DialErr: %w", err.Error(), errSSHDialFailed)
+ return nil, fmt.Errorf("failed to dial ssh. DialErr: %w", err)
}
return client, nil
@@ -703,3 +729,123 @@ func (c *sshClient) ExecutePreProvisionCommand(ctx context.Context, command stri
return exitStatus, s, nil
}
+
+func (c *sshClient) StartImageURLCommand(ctx context.Context, command, imageURL string, bootstrapData []byte, machineName string, deviceNames []string) (int, string, error) {
+ // validate deviceNames
+ for _, dn := range deviceNames {
+ if strings.Contains(dn, "/") {
+ return 0, "", fmt.Errorf("deviceName must not contain a slash (example: only sda not /dev/sda): %q", dn)
+ }
+ if strings.Contains(dn, " ") {
+ return 0, "", fmt.Errorf("deviceName must not contain spaces: %q", dn)
+ }
+ if dn == "" {
+ return 0, "", errors.New("deviceName must not be empty")
+ }
+ }
+ client, err := c.getSSHClient()
+ if err != nil {
+ return 0, "", err
+ }
+ defer client.Close()
+
+ scpClient, err := scp.NewClientBySSH(client)
+ if err != nil {
+ return 0, "", fmt.Errorf("couldn't create a new scp client: %w", err)
+ }
+
+ defer scpClient.Close()
+
+ if command == "" {
+ return 0, "", fmt.Errorf("image-url-command is empty")
+ }
+
+ fdCommand, err := os.Open(command) //nolint:gosec // the variable was valided.
+ if err != nil {
+ return 0, "", fmt.Errorf("error opening image-url-command %q: %w", command, err)
+ }
+ defer fdCommand.Close()
+
+ baseName := "image-url-command"
+ dest := "/root/" + baseName
+ err = scpClient.CopyFromFile(ctx, *fdCommand, dest, "0700")
+ if err != nil {
+ return 0, "", fmt.Errorf("error copying file %q to %s:%d:%s %w", command, c.ip, c.port, dest, err)
+ }
+
+ reader := bytes.NewReader(bootstrapData)
+ dest = "/root/bootstrap.data"
+ err = scpClient.CopyFile(ctx, reader, dest, "0700")
+ if err != nil {
+ return 0, "", fmt.Errorf("error copying boostrap data to %s:%d:%s %w", c.ip, c.port, dest, err)
+ }
+
+ cmd := fmt.Sprintf(`#!/usr/bin/bash
+OCI_REGISTRY_AUTH_TOKEN='%s' nohup /root/image-url-command '%s' /root/bootstrap.data '%s' '%s' >%s 2>&1 /root/image-url-command.pid
+`, os.Getenv("OCI_REGISTRY_AUTH_TOKEN"), imageURL, machineName, strings.Join(deviceNames, " "),
+ imageURLCommandLog)
+
+ out := c.runSSH(cmd)
+
+ exitStatus, err := out.ExitStatus()
+ if err != nil {
+ return 0, "", fmt.Errorf("error executing %q on %s:%d: %w", dest, c.ip, c.port, err)
+ }
+
+ s := out.StdOut + "\n" + out.StdErr
+ s = strings.TrimSpace(s)
+
+ return exitStatus, s, nil
+}
+
+func (c *sshClient) StateOfImageURLCommand() (state ImageURLCommandState, stdoutStderr string, err error) {
+ out := c.runSSH(`[ -e /root/image-url-command.pid ]`)
+ exitStatus, err := out.ExitStatus()
+ if err != nil {
+ return ImageURLCommandStateNotStarted, "", fmt.Errorf("getting exit status of image-url-command failed: %w", err)
+ }
+ if exitStatus > 0 {
+ // file does exists
+ return ImageURLCommandStateNotStarted, "", nil
+ }
+
+ out = c.runSSH(`ps -p "$(cat /root/image-url-command.pid)" -o args= | grep -q image-url-command`)
+ exitStatus, err = out.ExitStatus()
+ if err != nil {
+ return ImageURLCommandStateNotStarted, "", fmt.Errorf("detecting if image-url-command is still running failed: %w", err)
+ }
+
+ logFile, err := c.getImageURLCommandOutput()
+ if err != nil {
+ return ImageURLCommandStateFailed, logFile, err
+ }
+
+ if exitStatus == 0 {
+ return ImageURLCommandStateRunning, logFile, nil
+ }
+
+ out = c.runSSH(fmt.Sprintf("tail -n 1 %s | grep -q IMAGE_URL_DONE", imageURLCommandLog))
+ exitStatus, err = out.ExitStatus()
+ if err != nil {
+ return ImageURLCommandStateNotStarted, logFile, fmt.Errorf("detecting if image-url-command was successful failed: %w", err)
+ }
+
+ if exitStatus > 0 {
+ return ImageURLCommandStateFailed,
+ fmt.Sprintf("IMAGE_URL_DONE not found in %s:\n%s", imageURLCommandLog, logFile), nil
+ }
+ return ImageURLCommandStateFinishedSuccessfully, logFile, nil
+}
+
+func (c *sshClient) getImageURLCommandOutput() (string, error) {
+ out := c.runSSH(fmt.Sprintf("cat %s", imageURLCommandLog)) // TODO: implement getFile for sshClient.
+ exitStatus, err := out.ExitStatus()
+ if err != nil {
+ return "", fmt.Errorf("getting logs of image-url-command failed: %w", err)
+ }
+ if exitStatus > 0 {
+ return "", fmt.Errorf("getting logs of image-url-command failed. Non zero status of 'cat'")
+ }
+ return out.StdOut, nil
+}
diff --git a/pkg/services/baremetal/host/host.go b/pkg/services/baremetal/host/host.go
index 84156177b..52c8be2ab 100644
--- a/pkg/services/baremetal/host/host.go
+++ b/pkg/services/baremetal/host/host.go
@@ -37,6 +37,7 @@ import (
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
+ "sigs.k8s.io/cluster-api/util"
"sigs.k8s.io/cluster-api/util/conditions"
"sigs.k8s.io/cluster-api/util/record"
ctrl "sigs.k8s.io/controller-runtime"
@@ -190,6 +191,14 @@ func (s *Service) actionPreparing(ctx context.Context) actionResult {
s.scope.HetznerBareMetalHost.SetError(infrav1.PermanentError, msg)
return actionStop{}
}
+ if errors.Is(err, os.ErrDeadlineExceeded) {
+ // If the Hetzner API returns this, we just want to retry later:
+ // Get "https://robot-ws.your-server.de/server/1234": net/http: TLS handshake timeout
+ s.scope.Logger.Info("GetBMServer timed out, will retry later", "error", err)
+ return actionContinue{
+ delay: 10 * time.Second,
+ }
+ }
return actionError{err: fmt.Errorf("failed to get bare metal server: %w", err)}
}
@@ -238,25 +247,29 @@ func (s *Service) actionPreparing(ctx context.Context) actionResult {
return actionError{err: fmt.Errorf("failed to enforce rescue mode: %w", err)}
}
- sshClient := s.scope.SSHClientFactory.NewClient(sshclient.Input{
- PrivateKey: sshclient.CredentialsFromSecret(s.scope.OSSSHSecret, s.scope.HetznerBareMetalHost.Spec.Status.SSHSpec.SecretRef).PrivateKey,
- Port: s.scope.HetznerBareMetalHost.Spec.Status.SSHSpec.PortAfterCloudInit,
- IP: s.scope.HetznerBareMetalHost.Spec.Status.GetIPAddress(),
- })
+ if s.scope.SSHAfterInstallImage {
+ // We have ssh access to running nodes. Maybe we can reboot via ssh instead of
+ // using the robot API.
+ sshClient := s.scope.SSHClientFactory.NewClient(sshclient.Input{
+ PrivateKey: sshclient.CredentialsFromSecret(s.scope.OSSSHSecret, s.scope.HetznerBareMetalHost.Spec.Status.SSHSpec.SecretRef).PrivateKey,
+ Port: s.scope.HetznerBareMetalHost.Spec.Status.SSHSpec.PortAfterInstallImage,
+ IP: s.scope.HetznerBareMetalHost.Spec.Status.GetIPAddress(),
+ })
- // Check hostname with sshClient
- out := sshClient.GetHostName()
- if trimLineBreak(out.StdOut) != "" {
- // we managed access with ssh - we can do an ssh reboot
- if err := handleSSHError(sshClient.Reboot()); err != nil {
- return actionError{err: fmt.Errorf("failed to reboot server via ssh (actionPreparing): %w", err)}
+ // Check hostname with sshClient
+ out := sshClient.GetHostName()
+ if trimLineBreak(out.StdOut) != "" {
+ // we managed access with ssh - we can do an ssh reboot
+ if err := handleSSHError(sshClient.Reboot()); err != nil {
+ return actionError{err: fmt.Errorf("failed to reboot server via ssh (actionPreparing): %w", err)}
+ }
+ msg := "Rebooting into rescue mode."
+ createSSHRebootEvent(ctx, s.scope.HetznerBareMetalHost, msg)
+ // we immediately set an error message in the host status to track the reboot we just performed
+ s.scope.HetznerBareMetalHost.SetError(infrav1.ErrorTypeSSHRebootTriggered, fmt.Sprintf("Phase %s, reboot via ssh: %s",
+ s.scope.HetznerBareMetalHost.Spec.Status.ProvisioningState, msg))
+ return actionComplete{} // next: Registering
}
- msg := "Rebooting into rescue mode."
- createSSHRebootEvent(ctx, s.scope.HetznerBareMetalHost, msg)
- // we immediately set an error message in the host status to track the reboot we just performed
- s.scope.HetznerBareMetalHost.SetError(infrav1.ErrorTypeSSHRebootTriggered, fmt.Sprintf("Phase %s, reboot via ssh: %s",
- s.scope.HetznerBareMetalHost.Spec.Status.ProvisioningState, msg))
- return actionComplete{} // next: Registering
}
// Check if software reboot is available. If it is not, choose hardware reboot.
@@ -622,7 +635,7 @@ func (s *Service) actionRegistering(ctx context.Context) actionResult {
timeSinceReboot := "unknown"
if s.scope.HetznerBareMetalHost.Spec.Status.LastUpdated != nil {
- timeSinceReboot = time.Since(s.scope.HetznerBareMetalHost.Spec.Status.LastUpdated.Time).String()
+ timeSinceReboot = time.Since(s.scope.HetznerBareMetalHost.Spec.Status.LastUpdated.Time).Round(time.Second).String()
}
s.scope.Logger.Info("Could not reach rescue system. Will retry some seconds later.", "out", out.String(), "hostName", hostName,
@@ -859,6 +872,7 @@ func obtainHardwareDetailsNics(sshClient sshclient.Client) ([]infrav1.NIC, error
stringArray := strings.Split(stdOut, "\n")
nicsArray := make([]infrav1.NIC, 0, len(stringArray))
+ ipFound := false
for _, str := range stringArray {
validJSONString := validJSONFromSSHOutput(str)
@@ -883,6 +897,15 @@ func obtainHardwareDetailsNics(sshClient sshclient.Client) ([]infrav1.NIC, error
IP: nic.IP,
SpeedMbps: speedMbps,
})
+
+ if nic.IP != "" {
+ ipFound = true
+ }
+ }
+ // if no IP was found, we return an error
+ // See nodeAddresses()
+ if !ipFound {
+ return nil, fmt.Errorf("no IP found in NICs: %+v", nicsArray)
}
return nicsArray, nil
@@ -1172,6 +1195,9 @@ func (s *Service) actionImageInstalling(ctx context.Context) actionResult {
return actionStop{}
}
+ if s.scope.HetznerBareMetalHost.Spec.Status.InstallImage.Image.UseCustomImageURLCommand {
+ return s.actionImageInstallingCustomImageURLCommand(ctx, sshClient)
+ }
state, err := sshClient.GetInstallImageState()
if err != nil {
return actionError{err: fmt.Errorf("failed to get state of installimage processes: %w", err)}
@@ -1192,6 +1218,133 @@ func (s *Service) actionImageInstalling(ctx context.Context) actionResult {
}
}
+func (s *Service) actionImageInstallingCustomImageURLCommand(ctx context.Context, sshClient sshclient.Client) actionResult {
+ host := s.scope.HetznerBareMetalHost
+
+ state, logFile, err := sshClient.StateOfImageURLCommand()
+ if err != nil {
+ return actionError{err: fmt.Errorf("StateOfImageURLCommand failed: %w", err)}
+ }
+
+ duration := time.Since(host.Spec.Status.LastUpdated.Time)
+ // Please keep the number (7) in sync with the docstring of ImageURL.
+ if duration > 7*time.Minute {
+ // timeout. Something has failed.
+ msg := fmt.Sprintf("ImageURLCommand timed out after %s. Deleting machine",
+ duration.Round(time.Second).String())
+ s.scope.Logger.Error(nil, msg, "logFile", logFile)
+ conditions.MarkFalse(host, infrav1.ProvisionSucceededCondition,
+ "ImageURLCommandTimedOut", clusterv1.ConditionSeverityWarning,
+ "%s", msg)
+ return s.recordActionFailure(infrav1.FatalError, msg)
+ }
+
+ switch state {
+ case sshclient.ImageURLCommandStateRunning:
+ return actionContinue{delay: 10 * time.Second}
+
+ case sshclient.ImageURLCommandStateFinishedSuccessfully:
+ record.Event(s.scope.HetznerBareMetalHost, "ImageURLCommandOutput", logFile)
+ s.scope.Logger.Info("ImageURLCommandOutput", "logFile", logFile)
+
+ // Update name in robot API
+ if _, err := s.scope.RobotClient.SetBMServerName(s.scope.HetznerBareMetalHost.Spec.ServerID, s.scope.Hostname()); err != nil {
+ record.Warn(s.scope.HetznerBareMetalHost, "SetBMServerNameFailed", err.Error())
+ s.handleRobotRateLimitExceeded(err, "SetBMServerName")
+ return actionError{err: fmt.Errorf("failed to update name of host in robot API: %w", err)}
+ }
+
+ // Reboot via SSH
+ if err := sshClient.Reboot().Err; err != nil {
+ err = fmt.Errorf("failed to reboot server (after install-image): %w", err)
+ record.Warn(s.scope.HetznerBareMetalHost, "RebootFailed", err.Error())
+ return actionError{err: err}
+ }
+
+ msg := "machine image and cloud-init data got installed (via image-url-command)"
+ createSSHRebootEvent(ctx, s.scope.HetznerBareMetalHost, msg)
+
+ // clear potential errors - all done
+ s.scope.HetznerBareMetalHost.ClearError()
+ return actionComplete{}
+
+ case sshclient.ImageURLCommandStateFailed:
+ record.Warn(s.scope.HetznerBareMetalHost, "InstallImageNotSuccessful", logFile)
+ msg := "image-url-command failed"
+ s.scope.Logger.Error(nil, msg, "logFile", logFile)
+ conditions.MarkFalse(host, infrav1.ProvisionSucceededCondition,
+ "ImageURLCommandFailed", clusterv1.ConditionSeverityWarning,
+ "%s", msg)
+ return s.recordActionFailure(infrav1.FatalError, msg)
+
+ case sshclient.ImageURLCommandStateNotStarted:
+ data, err := s.scope.GetRawBootstrapData(ctx)
+ if err != nil {
+ return actionError{err: fmt.Errorf("baremetal GetRawBootstrapData failed: %w", err)}
+ }
+
+ if s.scope.ImageURLCommand == "" {
+ err = errors.New("internal error: --baremetal-image-url-command is not set?")
+ s.scope.Logger.Error(err, "")
+ conditions.MarkFalse(s.scope.HetznerBareMetalHost, infrav1.ProvisionSucceededCondition,
+ "ImageURLCommandMissing",
+ clusterv1.ConditionSeverityError,
+ "%s", err.Error())
+ // this can only be changed by updating the controller. This will make the
+ // controller reconcile all resources.
+ return actionContinue{delay: time.Hour}
+ }
+
+ // get the information about storage devices again to have the latest names.
+ // Device names can change during restart.
+ storage, err := obtainHardwareDetailsStorage(sshClient)
+ if err != nil {
+ return actionError{err: fmt.Errorf("failed to obtain hardware details storage: %w", err)}
+ }
+
+ // get device names from storage device
+ deviceNames := getDeviceNames(s.scope.HetznerBareMetalHost.Spec.RootDeviceHints.ListOfWWN(), storage)
+
+ exitStatus, stdoutStderr, err := sshClient.StartImageURLCommand(ctx, s.scope.ImageURLCommand, s.scope.HetznerBareMetalHost.Spec.Status.InstallImage.Image.URL, data, s.scope.Hostname(), deviceNames)
+ if err != nil {
+ err := fmt.Errorf("StartImageURLCommand failed (retrying): %w", err)
+ // This could be a temporary network error. Retry.
+ s.scope.Logger.Error(err, "",
+ "ImageURLCommand", s.scope.ImageURLCommand,
+ "exitStatus", exitStatus,
+ "stdoutStderr", stdoutStderr)
+ conditions.MarkFalse(s.scope.HetznerBareMetalHost, infrav1.ProvisionSucceededCondition,
+ "ImageURLCommandFailedToStart",
+ clusterv1.ConditionSeverityWarning,
+ "%s", err.Error())
+ return actionError{err: err}
+ }
+
+ if exitStatus != 0 {
+ msg := "StartImageURLCommand failed with non-zero exit status. Deleting machine"
+ s.scope.Logger.Error(nil, msg,
+ "ImageURLCommand", s.scope.ImageURLCommand,
+ "exitStatus", exitStatus,
+ "stdoutStderr", stdoutStderr)
+ conditions.MarkFalse(s.scope.HetznerBareMetalHost, infrav1.ProvisionSucceededCondition,
+ "StartImageURLCommandFailed",
+ clusterv1.ConditionSeverityWarning,
+ "%s", msg)
+ return s.recordActionFailure(infrav1.ProvisioningError, msg)
+ }
+
+ conditions.MarkFalse(s.scope.HetznerBareMetalHost, infrav1.ProvisionSucceededCondition,
+ "ImageURLCommandStarted",
+ clusterv1.ConditionSeverityInfo,
+ "baremetal-image-url-command started")
+
+ return actionContinue{delay: 55 * time.Second}
+
+ default:
+ return actionError{err: fmt.Errorf("unknown ImageURLCommandState: %q", state)}
+ }
+}
+
func (s *Service) actionImageInstallingStartBackgroundProcess(ctx context.Context, sshClient sshclient.Client) actionResult {
// CheckDisk before accessing the disk
info, err := sshClient.CheckDisk(ctx, s.scope.HetznerBareMetalHost.Spec.RootDeviceHints.ListOfWWN())
@@ -1403,6 +1556,14 @@ func (s *Service) actionImageInstallingFinished(ctx context.Context, sshClient s
// Update name in robot API
if _, err := s.scope.RobotClient.SetBMServerName(s.scope.HetznerBareMetalHost.Spec.ServerID, s.scope.Hostname()); err != nil {
+ if errors.Is(err, os.ErrDeadlineExceeded) {
+ // If the Hetzner API returns this, we just want to retry later:
+ // Post "https://robot-ws.your-server.de/server/1234": net/http: TLS handshake timeout
+ s.scope.Logger.Info("SetBMServerName timed out, will retry later", "error", err)
+ return actionContinue{
+ delay: 10 * time.Second,
+ }
+ }
record.Warn(s.scope.HetznerBareMetalHost, "SetBMServerNameFailed", err.Error())
s.handleRobotRateLimitExceeded(err, "SetBMServerName")
return actionError{err: fmt.Errorf("failed to update name of host in robot API: %w", err)}
@@ -1454,6 +1615,7 @@ func (s *Service) createAutoSetupInput(sshClient sshclient.Client) (autoSetupInp
}
// get the information about storage devices again to have the latest names which are then taken for installimage
+ // Device names can change during restart.
storage, err := obtainHardwareDetailsStorage(sshClient)
if err != nil {
return autoSetupInput{}, actionError{err: fmt.Errorf("failed to obtain hardware details storage: %w", err)}
@@ -1561,9 +1723,19 @@ func verifyConnectionRefused(sshClient sshclient.Client, port int) bool {
// next: Provisioned
func (s *Service) actionEnsureProvisioned(ctx context.Context) (ar actionResult) {
markProvisionPending(s.scope.HetznerBareMetalHost, infrav1.StateEnsureProvisioned)
+
+ if !s.scope.SSHAfterInstallImage {
+ // Command line argument `--baremetal-ssh-after-install-image=false` was used.
+ // This mean we do not connect via ssh to the machine after the image got installed.
+ record.Event(s.scope.HetznerBareMetalHost, "ServerProvisioned", "server successfully provisioned ('ensure-provisioned' was skipped)")
+ conditions.MarkTrue(s.scope.HetznerBareMetalHost, infrav1.ProvisionSucceededCondition)
+ s.scope.HetznerBareMetalHost.ClearError()
+ return actionComplete{}
+ }
+
sshClient := s.scope.SSHClientFactory.NewClient(sshclient.Input{
PrivateKey: sshclient.CredentialsFromSecret(s.scope.OSSSHSecret, s.scope.HetznerBareMetalHost.Spec.Status.SSHSpec.SecretRef).PrivateKey,
- Port: s.scope.HetznerBareMetalHost.Spec.Status.SSHSpec.PortAfterCloudInit,
+ Port: s.scope.HetznerBareMetalHost.Spec.Status.SSHSpec.PortAfterInstallImage,
IP: s.scope.HetznerBareMetalHost.Spec.Status.GetIPAddress(),
})
@@ -1586,14 +1758,7 @@ func (s *Service) actionEnsureProvisioned(ctx context.Context) (ar actionResult)
record.Warnf(s.scope.HetznerBareMetalHost, "UnexpectedHostName",
"EnsureProvision: wanted %q. %s", wantHostName, err.Error())
}
- return actionError{err: fmt.Errorf("failed to handle incomplete boot - provisioning: %w", err)}
- }
- // A connection failed error could mean that cloud init is still running (if cloudInit introduces a new port)
- if isSSHConnectionRefusedError {
- if actionRes := s.handleConnectionRefused(ctx); actionRes != nil {
- s.scope.Logger.Info("ensureProvisioned: ConnectionRefused", "actionResult", actionRes)
- return actionRes
- }
+ return actionError{err: fmt.Errorf("failed to handle incomplete boot - actionEnsureProvisioned: %w", err)}
}
failed, err := s.handleIncompleteBoot(ctx, false, isTimeout, isSSHConnectionRefusedError)
@@ -1612,50 +1777,49 @@ func (s *Service) actionEnsureProvisioned(ctx context.Context) (ar actionResult)
createEventWithCloudInitOutput := func(ar actionResult) actionResult {
// Create an Event which contains the cloud-init-output.
var err error
- errMsg := ""
- f := record.Warnf
switch v := ar.(type) {
case actionContinue:
// Do not create and event containing the output, wait until finished.
return ar
case actionComplete:
- f = record.Eventf
+ err = nil
case actionError:
err = v.err
- errMsg = fmt.Sprintf(" (%s)", v.err.Error())
+ default:
+ s.scope.Logger.Info("Unhandled type of actionResult",
+ "actionResult", ar)
}
out := sshClient.GetCloudInitOutput()
- if out.Err != nil || out.StdErr != "" {
+ exitStatus, exitError := out.ExitStatus()
+ if exitError != nil {
+ return actionError{err: fmt.Errorf("failed to get cloud init output (ssh connection failed): %w", errors.Join(exitError, err))}
+ }
+ if exitStatus != 0 || out.StdErr != "" {
+ err = errors.Join(err, fmt.Errorf("failed to get cloud init output (ssh connection worked): %s",
+ out.String()))
+ }
+ if err != nil {
record.Warnf(s.scope.HetznerBareMetalHost, "GetCloudInitOutputFailed",
- "GetCloudInitOutput failed to get /var/log/cloud-init-output.log: stdout %q, stderr %q, err %q",
- out.StdOut, out.StdErr, out.Err.Error())
- if err != nil {
- return actionError{err: fmt.Errorf("failed to get cloud init output: %w, while handling: %w", out.Err, err)}
- }
+ "GetCloudInitOutput failed to get /var/log/cloud-init-output.log: %s",
+ err)
return actionError{err: fmt.Errorf("failed to get cloud init output: %w", err)}
}
- f(s.scope.HetznerBareMetalHost, "CloudInitOutput", "cloud init output%s:\n%s",
- errMsg,
+ record.Eventf(s.scope.HetznerBareMetalHost, "CloudInitOutput", "cloud init output:\n%s",
out.StdOut)
return ar
}
// Check the status of cloud init
- actResult, msg, _ := s.checkCloudInitStatus(ctx, sshClient)
+ actResult, msg := s.checkCloudInitStatus(ctx, sshClient)
if _, complete := actResult.(actionComplete); !complete {
record.Event(s.scope.HetznerBareMetalHost, "CloudInitStillRunning", msg)
return createEventWithCloudInitOutput(actResult)
}
- // Check whether cloud init did not run successfully even though it shows "done"
- // Check this only when the port did not change. Because if it did, then we can already confirm at this point
- // that the change worked and the new port is usable. This is a strong enough indication for us to assume cloud init worked.
- if s.scope.HetznerBareMetalHost.Spec.Status.SSHSpec.PortAfterInstallImage == s.scope.HetznerBareMetalHost.Spec.Status.SSHSpec.PortAfterCloudInit {
- actResult = s.handleCloudInitNotStarted(ctx)
- if _, complete := actResult.(actionComplete); !complete {
- s.scope.Logger.Info("ensureProvisioned: handleCloudInitNotStarted", "actResult", actResult)
- return createEventWithCloudInitOutput(actResult)
- }
+ actResult = s.handleCloudInitNotStarted(ctx)
+ if _, complete := actResult.(actionComplete); !complete {
+ s.scope.Logger.Info("ensureProvisioned: handleCloudInitNotStarted", "actResult", actResult)
+ return createEventWithCloudInitOutput(actResult)
}
record.Event(s.scope.HetznerBareMetalHost, "ServerProvisioned", "server successfully provisioned")
@@ -1664,74 +1828,51 @@ func (s *Service) actionEnsureProvisioned(ctx context.Context) (ar actionResult)
return createEventWithCloudInitOutput(actionComplete{})
}
-// handleConnectionRefused checks cloud init status via ssh to the old ssh port if the new ssh port
-// gave a connection refused error.
-func (s *Service) handleConnectionRefused(ctx context.Context) actionResult {
- // Nothing to do if ports didn't change.
- if s.scope.HetznerBareMetalHost.Spec.Status.SSHSpec.PortAfterInstallImage == s.scope.HetznerBareMetalHost.Spec.Status.SSHSpec.PortAfterCloudInit {
- return nil
- }
- oldSSHClient := s.scope.SSHClientFactory.NewClient(sshclient.Input{
- PrivateKey: sshclient.CredentialsFromSecret(s.scope.OSSSHSecret, s.scope.HetznerBareMetalHost.Spec.Status.SSHSpec.SecretRef).PrivateKey,
- Port: s.scope.HetznerBareMetalHost.Spec.Status.SSHSpec.PortAfterInstallImage,
- IP: s.scope.HetznerBareMetalHost.Spec.Status.GetIPAddress(),
- })
- actResult, _, err := s.checkCloudInitStatus(ctx, oldSSHClient)
- // If this ssh client also gives an error, then we go back to analyzing the error of the first ssh call
- // This happens in the statement below this one.
- if err == nil {
- // If cloud-init status == "done" and cloud init was successful,
- // then we will soon reboot and be able to access the server via the new port
- if _, complete := actResult.(actionComplete); complete {
- // Check whether cloud init did not run successfully even though it shows "done"
- actResult := s.handleCloudInitNotStarted(ctx)
- if _, complete := actResult.(actionComplete); complete {
- return actionContinue{delay: 10 * time.Second}
- }
- return actResult
- }
- }
- if _, actionerr := actResult.(actionError); !actionerr {
- return actResult
+func (s *Service) checkCloudInitStatus(ctx context.Context, sshClient sshclient.Client) (actionResult, string) {
+ out := sshClient.CloudInitStatus()
+
+ status, err := out.ExitStatus()
+ if err != nil {
+ err = fmt.Errorf("getting CloudInitStatus failed (ssh connection failed): %w", err)
+ return actionContinue{delay: 5 * time.Second}, err.Error()
}
- return nil
-}
-func (s *Service) checkCloudInitStatus(ctx context.Context, sshClient sshclient.Client) (actionResult, string, error) {
- out := sshClient.CloudInitStatus()
- // This error is interesting for further logic and might happen because of the fact that the sshClient has the wrong port
- if out.Err != nil {
- return actionError{err: fmt.Errorf("failed to get cloud init status: %w", out.Err)}, "", out.Err
+ if status != 0 {
+ err = fmt.Errorf("command of CloudInitStatus failed (ssh connection worked): %s",
+ out.String())
+ return actionError{err: err}, err.Error()
}
stdOut := trimLineBreak(out.StdOut)
switch {
case strings.Contains(stdOut, "status: running"):
// Cloud init is still running
- return actionContinue{delay: 5 * time.Second}, "cloud-init still running", nil
+ return actionContinue{delay: 5 * time.Second}, "cloud-init still running"
+
case strings.Contains(stdOut, "status: disabled"):
// Reboot needs to be triggered again - did not start yet
out = sshClient.Reboot()
msg := "cloud-init-status was 'disabled'"
if err := handleSSHError(out); err != nil {
- return actionError{err: fmt.Errorf("failed to reboot (%s): %w", msg, err)}, "", nil
+ return actionError{err: fmt.Errorf("failed to reboot (%s): %w", msg, err)}, ""
}
createSSHRebootEvent(ctx, s.scope.HetznerBareMetalHost, msg)
s.scope.HetznerBareMetalHost.SetError(infrav1.ErrorTypeSSHRebootTriggered, "ssh reboot just triggered")
record.Warn(s.scope.HetznerBareMetalHost, "SSHRebootAfterCloudInitStatusDisabled", msg)
- return actionContinue{delay: 5 * time.Second}, "cloud-init was disabled. Triggered a reboot again", nil
+ return actionContinue{delay: 5 * time.Second}, "cloud-init was disabled. Triggered a reboot again"
+
case strings.Contains(stdOut, "status: done"):
s.scope.HetznerBareMetalHost.ClearError()
- return actionComplete{}, "cloud-init is done", nil
+ return actionComplete{}, "cloud-init is done"
+
case strings.Contains(stdOut, "status: error"):
- record.Warn(s.scope.HetznerBareMetalHost, "CloudInitFailed", "cloud init returned status error")
- return s.recordActionFailure(infrav1.FatalError, "cloud init returned status error"), "", nil
+ msg := fmt.Sprintf("cloud init returned status error: %s", out.String())
+ record.Warn(s.scope.HetznerBareMetalHost, "CloudInitFailed", msg)
+ return s.recordActionFailure(infrav1.FatalError, msg), msg
+
default:
- // Errors are handled after stdOut in this case, as status: error returns an exited with status 1 error
- if err := handleSSHError(out); err != nil {
- return actionError{err: fmt.Errorf("failed to get cloud init status: %w", err)}, "", nil
- }
- return actionContinue{delay: 5 * time.Second}, fmt.Sprintf("cloud-init unknown output: %s. %s", out.StdOut, out.StdErr), nil
+ err = fmt.Errorf("unknown cloud-init output: %s", out.String())
+ return actionError{err: err}, err.Error()
}
}
@@ -1748,7 +1889,7 @@ func (s *Service) handleCloudInitNotStarted(ctx context.Context) actionResult {
}
if trimLineBreak(out.StdOut) != "" {
- // it was not succesfull. Prepare and reboot again
+ // it was not successful. Prepare and reboot again
out = oldSSHClient.CleanCloudInitLogs()
if err := handleSSHError(out); err != nil {
return actionError{err: fmt.Errorf("failed to CleanCloudInitLogs: %w", err)}
@@ -1810,65 +1951,234 @@ func analyzeSSHOutputProvisioned(out sshclient.Output) (isTimeout, isConnectionR
// next: Stays in Provisioned (final state)
func (s *Service) actionProvisioned(ctx context.Context) actionResult {
// set host to provisioned
- conditions.MarkTrue(s.scope.HetznerBareMetalHost, infrav1.ProvisionSucceededCondition)
rebootDesired := s.scope.HetznerBareMetalHost.HasRebootAnnotation()
- isRebooted := s.scope.HetznerBareMetalHost.Spec.Status.Rebooted
- creds := sshclient.CredentialsFromSecret(s.scope.OSSSHSecret, s.scope.HetznerBareMetalHost.Spec.Status.SSHSpec.SecretRef)
- in := sshclient.Input{
- PrivateKey: creds.PrivateKey,
- Port: s.scope.HetznerBareMetalHost.Spec.Status.SSHSpec.PortAfterCloudInit,
- IP: s.scope.HetznerBareMetalHost.Spec.Status.GetIPAddress(),
+
+ host := s.scope.HetznerBareMetalHost
+
+ if !rebootDesired {
+ host.Spec.Status.Rebooted = false
+ host.Spec.Status.ExternalIDs.RebootAnnotationNodeBootID = ""
+ host.Spec.Status.ExternalIDs.RebootAnnotationSince.Time = time.Time{}
+ return actionComplete{} // Stays in Provisioned (final state)
}
- sshClient := s.scope.SSHClientFactory.NewClient(in)
- if rebootDesired {
- if isRebooted {
- // Reboot has been done already. Check whether it has been successful
- // Check hostname with sshClient
- out := sshClient.GetHostName()
+ if host.Spec.Status.ExternalIDs.RebootAnnotationSince.IsZero() {
+ host.Spec.Status.ExternalIDs.RebootAnnotationSince = metav1.Now()
+ }
- wantHostName := s.scope.Hostname()
+ rebootDuration := time.Since(host.Spec.Status.ExternalIDs.RebootAnnotationSince.Time)
+ if rebootDuration > 5*time.Minute {
+ msg := fmt.Sprintf("Rebooting timed out after: %s", rebootDuration.Round(time.Second))
+ s.scope.Logger.Info(msg)
+ conditions.MarkFalse(
+ s.scope.HetznerBareMetalHost,
+ infrav1.RebootSucceededCondition,
+ "TimedOut",
+ clusterv1.ConditionSeverityError,
+ "%s",
+ msg,
+ )
+ return s.recordActionFailure(infrav1.FatalError, msg)
+ }
- if trimLineBreak(out.StdOut) == wantHostName {
- // Reboot has been successful
- s.scope.HetznerBareMetalHost.Spec.Status.Rebooted = false
- s.scope.HetznerBareMetalHost.ClearRebootAnnotations()
+ wlClient, err := s.scope.WorkloadClusterClientFactory.NewWorkloadClient(ctx)
+ if err != nil {
+ err = fmt.Errorf("actionProvisioned (Reboot via Annotation), failed to get wlClient: %w", err)
+ conditions.MarkFalse(host, infrav1.RebootSucceededCondition,
+ "GetWorkloadClusterClientFailed",
+ clusterv1.ConditionSeverityWarning, "%s",
+ err.Error())
+ return actionError{err: err}
+ }
- s.scope.HetznerBareMetalHost.ClearError()
- return actionComplete{}
- }
- // Reboot has been ongoing
- isTimeout, isSSHConnectionRefusedError, err := analyzeSSHOutputProvisioned(out)
- if err != nil {
- if errors.Is(err, errUnexpectedHostName) {
- // One possible reason: The machine gets used by a second wl-cluster
- record.Warnf(s.scope.HetznerBareMetalHost, "UnexpectedHostName",
- "Provisioned: wanted %q. %s", wantHostName, err.Error())
- }
- return actionError{err: fmt.Errorf("failed to handle incomplete boot - provisioning: %w", err)}
+ // Get the capi-machine, so that we can get the Node-name in the wl-cluster.
+ machine, err := util.GetOwnerMachine(ctx, s.scope.Client, s.scope.HetznerBareMetalMachine.ObjectMeta)
+ if err != nil {
+ err = fmt.Errorf("actionProvisioned (Reboot via Annotation), GetOwnerMachine failed: %w",
+ err)
+ return actionError{err: err}
+ }
+
+ if machine.Status.NodeRef == nil {
+ // Very unlikely, but we want to avoid a panic.
+ err = errors.New("machine.Status.NodeRef is nil?")
+ return actionError{err: err}
+ }
+
+ nodeName := machine.Status.NodeRef.Name
+ node := &corev1.Node{}
+ err = wlClient.Get(ctx, client.ObjectKey{Name: nodeName}, node)
+ if err != nil {
+ err = fmt.Errorf("getting Node in wl-cluster failed: %w", err)
+ conditions.MarkFalse(host, infrav1.RebootSucceededCondition,
+ "GettingNodeInWorkloadClusterFailed",
+ clusterv1.ConditionSeverityWarning, "%s",
+ err.Error())
+ return actionError{err: err}
+ }
+
+ // get the current BootId from the wl-cluster. If it has changed, we know that the reboot was
+ // successful.
+ currentBootID := node.Status.NodeInfo.BootID
+ if currentBootID == "" {
+ err = errors.New("node.Status.NodeInfo.BootID is empty?")
+ s.scope.Logger.Error(err, "")
+ conditions.MarkFalse(host, infrav1.RebootSucceededCondition,
+ "NodeInWorkloadClusterHasEmptyBootID",
+ clusterv1.ConditionSeverityWarning, "%s",
+ err.Error())
+ return actionError{err: err}
+ }
+
+ isRebooted := host.Spec.Status.Rebooted
+ if !isRebooted {
+ // Reboot now
+
+ // Set current BootID, so we can detect a successful reboot
+ host.Spec.Status.ExternalIDs.RebootAnnotationNodeBootID = currentBootID
+
+ if s.scope.SSHAfterInstallImage {
+ creds := sshclient.CredentialsFromSecret(s.scope.OSSSHSecret, host.Spec.Status.SSHSpec.SecretRef)
+
+ in := sshclient.Input{
+ PrivateKey: creds.PrivateKey,
+ Port: host.Spec.Status.SSHSpec.PortAfterInstallImage,
+ IP: host.Spec.Status.GetIPAddress(),
}
- failed, err := s.handleIncompleteBoot(ctx, false, isTimeout, isSSHConnectionRefusedError)
- if failed {
- return s.recordActionFailure(infrav1.PermanentError, err.Error())
+
+ sshClient := s.scope.SSHClientFactory.NewClient(in)
+
+ out := sshClient.Reboot()
+ if err := handleSSHError(out); err != nil {
+ conditions.MarkFalse(host, infrav1.RebootSucceededCondition,
+ "RebootViaSSHFailed",
+ clusterv1.ConditionSeverityWarning, "%s",
+ err.Error())
+ return actionError{err: err}
}
- if err != nil {
- return actionError{err: fmt.Errorf(errMsgFailedHandlingIncompleteBoot, err)}
+ } else {
+ rebootType := infrav1.RebootTypeHardware
+ if _, err := s.scope.RobotClient.RebootBMServer(host.Spec.ServerID, rebootType); err != nil {
+ s.handleRobotRateLimitExceeded(err, rebootServerStr)
+
+ err = fmt.Errorf("actionProvisioned (Reboot via Annotation), reboot (%s) failed: %w", rebootType, err)
+
+ conditions.MarkFalse(host, infrav1.RebootSucceededCondition,
+ "RebootBMServerViaAPIFailed",
+ clusterv1.ConditionSeverityWarning, "%s",
+ err.Error())
+ return actionError{err: err}
}
- return actionContinue{delay: 10 * time.Second}
- }
- // Reboot now
- out := sshClient.Reboot()
- if err := handleSSHError(out); err != nil {
- return actionError{err: err}
}
- createSSHRebootEvent(ctx, s.scope.HetznerBareMetalHost, "Rebooting because annotation was set")
- s.scope.HetznerBareMetalHost.Spec.Status.Rebooted = true
+ msg := fmt.Sprintf("Rebooting because annotation was set. Old BootID: %s", currentBootID)
+
+ createSSHRebootEvent(ctx, host, msg)
+
+ conditions.MarkFalse(host, infrav1.RebootSucceededCondition,
+ "RebootingMachine",
+ clusterv1.ConditionSeverityInfo, "%s",
+ msg)
+ host.Spec.Status.Rebooted = true
+ return actionContinue{delay: 10 * time.Second}
+ }
+
+ // Reboot has already been performed. Now, verify its success by connecting to the wl-cluster
+ // and checking the BootID. If the BootID has changed, the reboot was successful.
+
+ if host.Spec.Status.ExternalIDs.RebootAnnotationNodeBootID != currentBootID {
+ // Reboot has been successful
+ s.scope.Logger.Info(fmt.Sprintf("BootID changed: %q -> %q", host.Spec.Status.ExternalIDs.RebootAnnotationNodeBootID, currentBootID))
+ host.Spec.Status.Rebooted = false
+ host.Spec.Status.ExternalIDs.RebootAnnotationNodeBootID = ""
+ host.Spec.Status.ExternalIDs.RebootAnnotationSince.Time = time.Time{}
+
+ conditions.MarkTrue(host, infrav1.RebootSucceededCondition)
+
+ host.ClearRebootAnnotations()
+ host.ClearError()
+
+ return actionComplete{}
+ }
+
+ if !s.scope.SSHAfterInstallImage {
+ // s.scope.SSHAfterInstallImage is false: No ssh allowed.
+ // We can only wait for the BootID in the wl-cluster to change.
+ conditions.MarkFalse(host, infrav1.RebootSucceededCondition,
+ "WaitingForNodeToBeRebooted",
+ clusterv1.ConditionSeverityInfo,
+ "Waiting for BootID of Node (in wl-cluster) to change (%s)",
+ time.Since(host.Spec.Status.ExternalIDs.RebootAnnotationSince.Time).Round(time.Second))
return actionContinue{delay: 10 * time.Second}
}
- return actionComplete{} // Stays in Provisioned (final state)
+ creds := sshclient.CredentialsFromSecret(s.scope.OSSSHSecret, host.Spec.Status.SSHSpec.SecretRef)
+ in := sshclient.Input{
+ PrivateKey: creds.PrivateKey,
+ Port: host.Spec.Status.SSHSpec.PortAfterInstallImage,
+ IP: host.Spec.Status.GetIPAddress(),
+ }
+ sshClient := s.scope.SSHClientFactory.NewClient(in)
+
+ // Check hostname with sshClient
+ out := sshClient.GetHostName()
+
+ wantHostName := s.scope.Hostname()
+
+ if trimLineBreak(out.StdOut) == wantHostName {
+ // Reboot has been successful
+ host.Spec.Status.Rebooted = false
+ host.Spec.Status.ExternalIDs.RebootAnnotationNodeBootID = ""
+ host.Spec.Status.ExternalIDs.RebootAnnotationSince.Time = time.Time{}
+
+ conditions.MarkTrue(host, infrav1.RebootSucceededCondition)
+
+ host.ClearRebootAnnotations()
+ host.ClearError()
+
+ return actionComplete{}
+ }
+ // Reboot has been ongoing
+ isTimeout, isSSHConnectionRefusedError, err := analyzeSSHOutputProvisioned(out)
+ if err != nil {
+ if errors.Is(err, errUnexpectedHostName) {
+ // One possible reason: The machine gets used by a second wl-cluster
+ record.Warnf(host, "UnexpectedHostName",
+ "Provisioned: wanted %q. %s", wantHostName, err.Error())
+ }
+ err = fmt.Errorf("failed to handle incomplete boot - actionProvisioned: %w", err)
+ conditions.MarkFalse(host, infrav1.RebootSucceededCondition,
+ "FailureFailGettingHostnameViaSSH",
+ clusterv1.ConditionSeverityWarning, "%s",
+ err.Error())
+ return actionError{err: err}
+ }
+
+ failed, err := s.handleIncompleteBoot(ctx, false, isTimeout, isSSHConnectionRefusedError)
+ if failed {
+ conditions.MarkFalse(host, infrav1.RebootSucceededCondition,
+ "RebootFailed",
+ clusterv1.ConditionSeverityWarning, "%s",
+ err.Error())
+ return s.recordActionFailure(infrav1.PermanentError, err.Error())
+ }
+ if err != nil {
+ err = fmt.Errorf(errMsgFailedHandlingIncompleteBoot, err)
+ conditions.MarkFalse(host, infrav1.RebootSucceededCondition,
+ "RebootFailed",
+ clusterv1.ConditionSeverityWarning, "%s",
+ err.Error())
+ return actionError{err: err}
+ }
+
+ conditions.MarkFalse(host, infrav1.RebootSucceededCondition,
+ "WaitingForNodeToBeRebooted",
+ clusterv1.ConditionSeverityInfo,
+ "Waiting for BootID of Node (in wl-cluster) to change (%s)",
+ time.Since(host.Spec.Status.ExternalIDs.RebootAnnotationSince.Time).Round(time.Second))
+
+ return actionContinue{delay: 10 * time.Second}
}
// next: None
@@ -1882,22 +2192,24 @@ func (s *Service) actionDeprovisioning(_ context.Context) actionResult {
return actionError{err: fmt.Errorf("failed to update name of host in robot API: %w", err)}
}
- // If has been provisioned completely, stop all running pods
- if s.scope.OSSSHSecret != nil {
- sshClient := s.scope.SSHClientFactory.NewClient(sshclient.Input{
- PrivateKey: sshclient.CredentialsFromSecret(s.scope.OSSSHSecret, s.scope.HetznerBareMetalHost.Spec.Status.SSHSpec.SecretRef).PrivateKey,
- Port: s.scope.HetznerBareMetalHost.Spec.Status.SSHSpec.PortAfterCloudInit,
- IP: s.scope.HetznerBareMetalHost.Spec.Status.GetIPAddress(),
- })
- out := sshClient.ResetKubeadm()
- s.scope.V(1).Info("Output of ResetKubeadm", "stdout", out.StdOut, "stderr", out.StdErr, "err", out.Err)
- if out.Err != nil {
- record.Warnf(s.scope.HetznerBareMetalHost, "FailedResetKubeAdm", "failed to reset kubeadm: %s", out.Err.Error())
+ if s.scope.SSHAfterInstallImage {
+ // If has been provisioned completely, stop all running pods
+ if s.scope.OSSSHSecret != nil {
+ sshClient := s.scope.SSHClientFactory.NewClient(sshclient.Input{
+ PrivateKey: sshclient.CredentialsFromSecret(s.scope.OSSSHSecret, s.scope.HetznerBareMetalHost.Spec.Status.SSHSpec.SecretRef).PrivateKey,
+ Port: s.scope.HetznerBareMetalHost.Spec.Status.SSHSpec.PortAfterInstallImage,
+ IP: s.scope.HetznerBareMetalHost.Spec.Status.GetIPAddress(),
+ })
+ out := sshClient.ResetKubeadm()
+ s.scope.V(1).Info("Output of ResetKubeadm", "stdout", out.StdOut, "stderr", out.StdErr, "err", out.Err)
+ if out.Err != nil {
+ record.Warnf(s.scope.HetznerBareMetalHost, "FailedResetKubeAdm", "failed to reset kubeadm: %s", out.Err.Error())
+ } else {
+ record.Event(s.scope.HetznerBareMetalHost, "SuccessfulResetKubeAdm", "Reset was successful.")
+ }
} else {
- record.Event(s.scope.HetznerBareMetalHost, "SuccessfulResetKubeAdm", "Reset was successful.")
+ s.scope.Info("OS SSH Secret is empty - cannot reset kubeadm")
}
- } else {
- s.scope.Info("OS SSH Secret is empty - cannot reset kubeadm")
}
// Only keep permanent errors on the host object after deprovisioning.
diff --git a/pkg/services/baremetal/host/host_suite_test.go b/pkg/services/baremetal/host/host_suite_test.go
index d80b42da0..1addc170e 100644
--- a/pkg/services/baremetal/host/host_suite_test.go
+++ b/pkg/services/baremetal/host/host_suite_test.go
@@ -17,6 +17,7 @@ limitations under the License.
package host
import (
+ "context"
"fmt"
"testing"
@@ -28,10 +29,12 @@ import (
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/klog/v2/textlogger"
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
+ "sigs.k8s.io/controller-runtime/pkg/client"
fakeclient "sigs.k8s.io/controller-runtime/pkg/client/fake"
infrav1 "github.com/syself/cluster-api-provider-hetzner/api/v1beta1"
"github.com/syself/cluster-api-provider-hetzner/pkg/scope"
+ secretutil "github.com/syself/cluster-api-provider-hetzner/pkg/secrets"
robotclient "github.com/syself/cluster-api-provider-hetzner/pkg/services/baremetal/client/robot"
sshclient "github.com/syself/cluster-api-provider-hetzner/pkg/services/baremetal/client/ssh"
"github.com/syself/cluster-api-provider-hetzner/test/helpers"
@@ -70,6 +73,8 @@ func newTestHostStateMachine(host *infrav1.HetznerBareMetalHost, service *Servic
return newHostStateMachine(host, service, log)
}
+var fakeBootID = "1234321"
+
func newTestService(
host *infrav1.HetznerBareMetalHost,
robotClient robotclient.Client,
@@ -79,21 +84,96 @@ func newTestService(
) *Service {
scheme := runtime.NewScheme()
utilruntime.Must(infrav1.AddToScheme(scheme))
+ utilruntime.Must(corev1.AddToScheme(scheme))
+ utilruntime.Must(clusterv1.AddToScheme(scheme))
c := fakeclient.NewClientBuilder().WithScheme(scheme).WithRuntimeObjects(host).Build()
+ ctx := context.Background()
+
+ capiMachine := &clusterv1.Machine{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: host.Name,
+ Namespace: host.Namespace,
+ },
+ TypeMeta: metav1.TypeMeta{
+ Kind: "Machine",
+ APIVersion: clusterv1.GroupVersion.String(),
+ },
+ Status: clusterv1.MachineStatus{
+ NodeRef: &corev1.ObjectReference{
+ Kind: "Node",
+ Name: host.Name,
+ APIVersion: "v1",
+ },
+ },
+ }
+ err := c.Create(ctx, capiMachine)
+ if err != nil {
+ panic(err)
+ }
+
+ node := &corev1.Node{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: host.Name,
+ },
+ Status: corev1.NodeStatus{
+ NodeInfo: corev1.NodeSystemInfo{
+ BootID: fakeBootID,
+ },
+ },
+ }
+ err = c.Create(ctx, node)
+ if err != nil {
+ panic(err)
+ }
+
+ hbmm := &infrav1.HetznerBareMetalMachine{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: host.Name,
+ Namespace: host.Namespace,
+ },
+ }
+
+ hbmm.OwnerReferences = append(hbmm.OwnerReferences, metav1.OwnerReference{
+ APIVersion: capiMachine.APIVersion,
+ Kind: capiMachine.Kind,
+ Name: capiMachine.Name,
+ UID: capiMachine.UID,
+ })
+
+ err = c.Create(ctx, hbmm)
+ if err != nil {
+ panic(err)
+ }
+
return &Service{
&scope.BareMetalHostScope{
- Logger: log,
- Client: c,
- SSHClientFactory: sshClientFactory,
- RobotClient: robotClient,
- HetznerBareMetalHost: host,
+ Logger: log,
+ Client: c,
+ SecretManager: secretutil.NewSecretManager(log, c, c),
+ SSHClientFactory: sshClientFactory,
+ RobotClient: robotClient,
+ HetznerBareMetalHost: host,
+ HetznerBareMetalMachine: hbmm,
HetznerCluster: &infrav1.HetznerCluster{
Spec: helpers.GetDefaultHetznerClusterSpec(),
},
// Attention: this doesn't make sense if we test with constant node names
- Cluster: &clusterv1.Cluster{ObjectMeta: metav1.ObjectMeta{Name: "cluster"}},
- OSSSHSecret: osSSHSecret,
- RescueSSHSecret: rescueSSHSecret,
+ Cluster: &clusterv1.Cluster{ObjectMeta: metav1.ObjectMeta{Name: "cluster"}},
+ OSSSHSecret: osSSHSecret,
+ RescueSSHSecret: rescueSSHSecret,
+ SSHAfterInstallImage: true,
+ WorkloadClusterClientFactory: &fakeWorkloadClusterClientFactory{
+ client: c,
+ },
+ ImageURLCommand: "image-url-command",
},
}
}
+
+type fakeWorkloadClusterClientFactory struct {
+ client client.Client
+}
+
+func (f *fakeWorkloadClusterClientFactory) NewWorkloadClient(_ context.Context) (client.Client, error) {
+ return f.client, nil
+}
diff --git a/pkg/services/baremetal/host/host_test.go b/pkg/services/baremetal/host/host_test.go
index 0303a7fcf..98e5ada94 100644
--- a/pkg/services/baremetal/host/host_test.go
+++ b/pkg/services/baremetal/host/host_test.go
@@ -19,14 +19,18 @@ package host
import (
"context"
"fmt"
+ "syscall"
"time"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
"github.com/stretchr/testify/mock"
"github.com/syself/hrobot-go/models"
+ corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/utils/ptr"
+ "sigs.k8s.io/cluster-api/util/conditions"
+ "sigs.k8s.io/controller-runtime/pkg/client"
infrav1 "github.com/syself/cluster-api-provider-hetzner/api/v1beta1"
bmmock "github.com/syself/cluster-api-provider-hetzner/pkg/services/baremetal/client/mocks"
@@ -96,6 +100,152 @@ var _ = Describe("SetErrorMessage", func() {
)
})
+var _ = Describe("actionImageInstalling (image-url-command)", func() {
+ ctx := context.Background()
+
+ newBaseHost := func() *infrav1.HetznerBareMetalHost {
+ host := helpers.BareMetalHost(
+ "test-host",
+ "default",
+ helpers.WithIPv4(),
+ helpers.WithConsumerRef(),
+ helpers.WithSSHStatus(),
+ )
+ // Set install image with custom image-url-command mode
+ host.Spec.Status.InstallImage = &infrav1.InstallImage{
+ Image: infrav1.Image{
+ URL: "https://example.com/foo/image",
+ UseCustomImageURLCommand: true,
+ },
+ }
+ // Ensure LastUpdated is now by default
+ t := metav1.Now()
+ host.Spec.Status.LastUpdated = &t
+ return host
+ }
+
+ It("returns continue when command is running", func() {
+ host := newBaseHost()
+ sshMock := &sshmock.Client{}
+ sshMock.On("GetHostName").Return(sshclient.Output{StdOut: "rescue"})
+ sshMock.On("StateOfImageURLCommand").Return(sshclient.ImageURLCommandStateRunning, "", nil)
+
+ svc := newTestService(host, nil, bmmock.NewSSHFactory(sshMock, sshMock, sshMock), nil, helpers.GetDefaultSSHSecret(rescueSSHKeyName, "default"))
+
+ res := svc.actionImageInstalling(ctx)
+ Expect(res).To(BeAssignableToTypeOf(actionContinue{}))
+ c := conditions.Get(host, infrav1.ProvisionSucceededCondition)
+ Expect(c.Message).To(Equal(`host (test-host) is still provisioning - state "image-installing"`))
+ })
+
+ It("reboots and completes when command finished successfully", func() {
+ host := newBaseHost()
+ sshMock := &sshmock.Client{}
+ sshMock.On("GetHostName").Return(sshclient.Output{StdOut: "rescue"})
+ sshMock.On("StateOfImageURLCommand").Return(sshclient.ImageURLCommandStateFinishedSuccessfully, "LOGFILE-CONTENT", nil)
+ sshMock.On("Reboot").Return(sshclient.Output{})
+
+ robot := robotmock.Client{}
+ robot.On("SetBMServerName", mock.Anything, infrav1.BareMetalHostNamePrefix+host.Spec.ConsumerRef.Name).Return(nil, nil)
+
+ svc := newTestService(host, &robot, bmmock.NewSSHFactory(sshMock, sshMock, sshMock), nil, helpers.GetDefaultSSHSecret(rescueSSHKeyName, "default"))
+
+ res := svc.actionImageInstalling(ctx)
+ Expect(res).To(BeAssignableToTypeOf(actionComplete{}))
+ Expect(sshMock.AssertCalled(GinkgoT(), "Reboot")).To(BeTrue())
+ Expect(robot.AssertCalled(GinkgoT(), "SetBMServerName", mock.Anything, infrav1.BareMetalHostNamePrefix+host.Spec.ConsumerRef.Name)).To(BeTrue())
+ // error should be cleared
+ Expect(host.Spec.Status.ErrorMessage).To(Equal(""))
+ c := conditions.Get(host, infrav1.ProvisionSucceededCondition)
+ Expect(c.Message).To(Equal(`host (test-host) is still provisioning - state "image-installing"`))
+ })
+
+ It("returns error when command failed", func() {
+ host := newBaseHost()
+ sshMock := &sshmock.Client{}
+ sshMock.On("GetHostName").Return(sshclient.Output{StdOut: "rescue"})
+ sshMock.On("StateOfImageURLCommand").Return(sshclient.ImageURLCommandStateFailed, "some logs", nil)
+
+ svc := newTestService(host, nil, bmmock.NewSSHFactory(sshMock, sshMock, sshMock), nil, helpers.GetDefaultSSHSecret(rescueSSHKeyName, "default"))
+ res := svc.actionImageInstalling(ctx)
+ Expect(res).To(BeAssignableToTypeOf(actionFailed{}))
+ c := conditions.Get(host, infrav1.ProvisionSucceededCondition)
+ Expect(c.Message).To(ContainSubstring("image-url-command failed"))
+ })
+
+ It("starts the command on NotStarted and continues", func() {
+ host := newBaseHost()
+ // set UserData secret ref and create the secret the scope's SecretManager will fetch
+ host.Spec.Status.UserData = &corev1.SecretReference{ // bootstrap secret ref
+ Name: "bootstrap-secret",
+ Namespace: host.Namespace,
+ }
+
+ // Build service with fake client containing the bootstrap secret
+ sshMock := &sshmock.Client{}
+ sshMock.On("GetHostName").Return(sshclient.Output{StdOut: "rescue"})
+ sshMock.On("StateOfImageURLCommand").Return(sshclient.ImageURLCommandStateNotStarted, "", nil)
+
+ svc := newTestService(host, nil, bmmock.NewSSHFactory(sshMock, sshMock, sshMock), nil, helpers.GetDefaultSSHSecret(rescueSSHKeyName, "default"))
+ sshMock.On("StartImageURLCommand", mock.Anything, "image-url-command", host.Spec.Status.InstallImage.Image.URL, mock.Anything, svc.scope.Hostname(), []string{"nvme1n1"}).Return(0, "", nil)
+ // Create bootstrap secret in fake client with key 'value'
+ secret := &corev1.Secret{ObjectMeta: metav1.ObjectMeta{Name: host.Spec.Status.UserData.Name, Namespace: host.Spec.Status.UserData.Namespace}, Data: map[string][]byte{"value": []byte("#cloud-config")}}
+ Expect(svc.scope.Client.Create(ctx, secret)).To(Succeed())
+
+ sshMock.On("GetHardwareDetailsStorage").Return(sshclient.Output{StdOut: `NAME="nvme1n1" TYPE="disk" HCTL="" MODEL="SAMSUNG MZVLB512HAJQ-00000" VENDOR="" SERIAL="S3W8NX0N811178" SIZE="512110190592" WWN="eui.0025388801b4dff2" ROTA="0"`})
+ svc.scope.HetznerBareMetalHost.Spec.RootDeviceHints = &infrav1.RootDeviceHints{
+ WWN: "eui.0025388801b4dff2",
+ }
+
+ res := svc.actionImageInstalling(ctx)
+ Expect(res).To(BeAssignableToTypeOf(actionContinue{}))
+ Expect(sshMock.AssertCalled(GinkgoT(), "StartImageURLCommand", mock.Anything, "image-url-command", host.Spec.Status.InstallImage.Image.URL, mock.Anything, svc.scope.Hostname(), []string{"nvme1n1"})).To(BeTrue())
+ c := conditions.Get(host, infrav1.ProvisionSucceededCondition)
+ Expect(c.Message).To(ContainSubstring(`baremetal-image-url-command started`))
+ })
+
+ It("records failure when StartImageURLCommand returns non-zero exit", func() {
+ host := newBaseHost()
+ host.Spec.Status.UserData = &corev1.SecretReference{Name: "bootstrap-secret", Namespace: host.Namespace}
+
+ sshMock := &sshmock.Client{}
+ sshMock.On("GetHostName").Return(sshclient.Output{StdOut: "rescue"})
+ sshMock.On("StateOfImageURLCommand").Return(sshclient.ImageURLCommandStateNotStarted, "", nil)
+
+ svc := newTestService(host, nil, bmmock.NewSSHFactory(sshMock, sshMock, sshMock), nil, helpers.GetDefaultSSHSecret(rescueSSHKeyName, "default"))
+ sshMock.On("StartImageURLCommand", mock.Anything, "image-url-command", host.Spec.Status.InstallImage.Image.URL, mock.Anything, svc.scope.Hostname(), []string{"nvme1n1"}).Return(7, "boom", nil)
+
+ secret := &corev1.Secret{ObjectMeta: metav1.ObjectMeta{Name: host.Spec.Status.UserData.Name, Namespace: host.Spec.Status.UserData.Namespace}, Data: map[string][]byte{"value": []byte("#cloud-config")}}
+ Expect(svc.scope.Client.Create(ctx, secret)).To(Succeed())
+
+ sshMock.On("GetHardwareDetailsStorage").Return(sshclient.Output{StdOut: `NAME="nvme1n1" TYPE="disk" HCTL="" MODEL="SAMSUNG MZVLB512HAJQ-00000" VENDOR="" SERIAL="S3W8NX0N811178" SIZE="512110190592" WWN="eui.0025388801b4dff2" ROTA="0"`})
+ svc.scope.HetznerBareMetalHost.Spec.RootDeviceHints = &infrav1.RootDeviceHints{
+ WWN: "eui.0025388801b4dff2",
+ }
+ res := svc.actionImageInstalling(ctx)
+ Expect(res).To(BeAssignableToTypeOf(actionFailed{}))
+ c := conditions.Get(host, infrav1.ProvisionSucceededCondition)
+ Expect(c.Message).To(ContainSubstring("StartImageURLCommand failed with non-zero exit status. Deleting machine"))
+ })
+
+ It("times out after 7 minutes", func() {
+ host := newBaseHost()
+ sevenPlus := metav1.NewTime(time.Now().Add(-8 * time.Minute))
+ host.Spec.Status.LastUpdated = &sevenPlus
+
+ sshMock := &sshmock.Client{}
+ sshMock.On("GetHostName").Return(sshclient.Output{StdOut: "rescue"})
+ sshMock.On("StateOfImageURLCommand").Return(sshclient.ImageURLCommandStateRunning, "", nil)
+
+ svc := newTestService(host, nil, bmmock.NewSSHFactory(sshMock, sshMock, sshMock), nil, helpers.GetDefaultSSHSecret(rescueSSHKeyName, "default"))
+
+ res := svc.actionImageInstalling(ctx)
+ Expect(res).To(BeAssignableToTypeOf(actionFailed{}))
+ c := conditions.Get(host, infrav1.ProvisionSucceededCondition)
+ Expect(c.Message).To(ContainSubstring("ImageURLCommand timed out"))
+ })
+})
+
var _ = Describe("test validateRootDeviceWwnsAreSubsetOfExistingWwns", func() {
It("should return error when storageDevices is empty", func() {
rootDeviceHints := &infrav1.RootDeviceHints{WWN: "wwn1"}
@@ -773,14 +923,14 @@ var _ = Describe("analyzeSSHOutputInstallImage", func() {
expectedErrMessage: "wrong ssh key",
}),
Entry("connectionRefused error, rescue active", testCaseAnalyzeSSHOutputInstallImageOutErr{
- err: sshclient.ErrConnectionRefused,
+ err: syscall.ECONNREFUSED,
rescueActive: true,
expectedIsTimeout: false,
expectedIsConnectionRefused: true,
expectedErrMessage: "",
}),
Entry("connectionRefused error, rescue not active", testCaseAnalyzeSSHOutputInstallImageOutErr{
- err: sshclient.ErrConnectionRefused,
+ err: syscall.ECONNREFUSED,
rescueActive: false,
expectedIsTimeout: false,
expectedIsConnectionRefused: true,
@@ -917,7 +1067,7 @@ var _ = Describe("analyzeSSHOutputInstallImage", func() {
expectedErrMessage: "wrong ssh key",
}),
Entry("connectionRefused error, port 22", testCaseAnalyzeSSHOutputInstallImageOutErr{
- err: sshclient.ErrConnectionRefused,
+ err: syscall.ECONNREFUSED,
errFromGetHostNameNil: true,
port: 22,
expectedIsTimeout: false,
@@ -925,7 +1075,7 @@ var _ = Describe("analyzeSSHOutputInstallImage", func() {
expectedErrMessage: "",
}),
Entry("connectionRefused error, port != 22, hostname error", testCaseAnalyzeSSHOutputInstallImageOutErr{
- err: sshclient.ErrConnectionRefused,
+ err: syscall.ECONNREFUSED,
errFromGetHostNameNil: false,
port: 23,
expectedIsTimeout: false,
@@ -933,7 +1083,7 @@ var _ = Describe("analyzeSSHOutputInstallImage", func() {
expectedErrMessage: "",
}),
Entry("connectionRefused error, port != 22, no hostname error", testCaseAnalyzeSSHOutputInstallImageOutErr{
- err: sshclient.ErrConnectionRefused,
+ err: syscall.ECONNREFUSED,
errFromGetHostNameNil: true,
port: 23,
expectedIsTimeout: false,
@@ -1108,7 +1258,7 @@ var _ = Describe("analyzeSSHOutputProvisioned", func() {
expectedErrMessage: ptr.To("wrong ssh key"),
}),
Entry("connection refused", testCaseAnalyzeSSHOutputProvisioned{
- out: sshclient.Output{Err: sshclient.ErrConnectionRefused},
+ out: sshclient.Output{Err: syscall.ECONNREFUSED},
expectedIsTimeout: false,
expectedIsConnectionRefused: true,
expectedErrMessage: nil,
@@ -1247,7 +1397,7 @@ var _ = Describe("actionRegistering", func() {
expectedErrorType: infrav1.ErrorTypeSSHRebootTriggered,
}),
Entry("connectionRefused", testCaseActionRegisteringIncompleteBoot{
- getHostNameOutput: sshclient.Output{Err: sshclient.ErrConnectionRefused},
+ getHostNameOutput: sshclient.Output{Err: syscall.ECONNREFUSED},
expectedErrorType: infrav1.ErrorTypeConnectionError,
}),
)
@@ -1262,7 +1412,7 @@ func registeringSSHMock(storageStdOut string) *sshmock.Client {
})
sshMock.On("GetHardwareDetailsNics").Return(sshclient.Output{
StdOut: `name="eth0" model="Realtek Semiconductor Co., Ltd. RTL8111/8168/8411 PCI Express Gigabit Ethernet Controller (rev 15)" mac="a8:a1:59:94:19:42" ipv4="23.88.6.239/26" speedMbps="1000"
-name="eth0" model="Realtek Semiconductor Co., Ltd. RTL8111/8168/8411 PCI Express Gigabit Ethernet Controller (rev 15)" mac="a8:a1:59:94:19:42" ipv6="2a01:4f8:272:3e0f::2/64" speedMbps="1000"`,
+name="eth0" model="Realtek Semiconductor Co., Ltd. RTL8111/8168/8411 PCI Express Gigabit Ethernet Controller (rev 15)" mac="a8:a1:59:94:19:42" ip="2a01:4f8:272:3e0f::2/64" speedMbps="1000"`,
})
sshMock.On("GetHardwareDetailsCPUArch").Return(sshclient.Output{StdOut: "myarch"})
sshMock.On("GetHardwareDetailsCPUModel").Return(sshclient.Output{StdOut: "mymodel"})
@@ -1383,7 +1533,6 @@ var _ = Describe("actionEnsureProvisioned", func() {
outSSHClientCheckSigterm sshclient.Output
outOldSSHClientCloudInitStatus sshclient.Output
outOldSSHClientCheckSigterm sshclient.Output
- samePorts bool
expectedActionResult actionResult
expectedErrorType infrav1.ErrorType
expectsSSHClientCallCloudInitStatus bool
@@ -1397,18 +1546,12 @@ var _ = Describe("actionEnsureProvisioned", func() {
DescribeTable("actionEnsureProvisioned",
func(in testCaseActionEnsureProvisioned) {
ctx := context.Background()
- var (
- portAfterCloudInit = 24
- portAfterInstallImage = 23
- )
- if in.samePorts {
- portAfterInstallImage = 24
- }
+ portAfterInstallImage := 24
host := helpers.BareMetalHost(
"test-host",
"default",
- helpers.WithSSHSpecInclPorts(portAfterInstallImage, portAfterCloudInit),
+ helpers.WithSSHSpecInclPorts(portAfterInstallImage),
helpers.WithIPv4(),
helpers.WithConsumerRef(),
)
@@ -1477,7 +1620,6 @@ var _ = Describe("actionEnsureProvisioned", func() {
outSSHClientCheckSigterm: sshclient.Output{},
outOldSSHClientCloudInitStatus: sshclient.Output{},
outOldSSHClientCheckSigterm: sshclient.Output{},
- samePorts: true,
expectedActionResult: actionContinue{},
expectedErrorType: infrav1.ErrorType(""),
expectsSSHClientCallCloudInitStatus: true,
@@ -1495,7 +1637,6 @@ var _ = Describe("actionEnsureProvisioned", func() {
outSSHClientCheckSigterm: sshclient.Output{StdOut: ""},
outOldSSHClientCloudInitStatus: sshclient.Output{},
outOldSSHClientCheckSigterm: sshclient.Output{},
- samePorts: true,
expectedActionResult: actionComplete{},
expectedErrorType: infrav1.ErrorType(""),
expectsSSHClientCallCloudInitStatus: true,
@@ -1513,7 +1654,6 @@ var _ = Describe("actionEnsureProvisioned", func() {
outSSHClientCheckSigterm: sshclient.Output{StdOut: "found SIGTERM in cloud init output logs"},
outOldSSHClientCloudInitStatus: sshclient.Output{},
outOldSSHClientCheckSigterm: sshclient.Output{},
- samePorts: true,
expectedActionResult: actionContinue{},
expectedErrorType: infrav1.ErrorType(""),
expectsSSHClientCallCloudInitStatus: true,
@@ -1531,7 +1671,6 @@ var _ = Describe("actionEnsureProvisioned", func() {
outSSHClientCheckSigterm: sshclient.Output{},
outOldSSHClientCloudInitStatus: sshclient.Output{},
outOldSSHClientCheckSigterm: sshclient.Output{},
- samePorts: true,
expectedActionResult: actionFailed{},
expectedErrorType: infrav1.FatalError,
expectsSSHClientCallCloudInitStatus: true,
@@ -1549,7 +1688,6 @@ var _ = Describe("actionEnsureProvisioned", func() {
outSSHClientCheckSigterm: sshclient.Output{},
outOldSSHClientCloudInitStatus: sshclient.Output{},
outOldSSHClientCheckSigterm: sshclient.Output{},
- samePorts: true,
expectedActionResult: actionContinue{},
expectedErrorType: infrav1.ErrorType(""),
expectsSSHClientCallCloudInitStatus: true,
@@ -1562,179 +1700,16 @@ var _ = Describe("actionEnsureProvisioned", func() {
),
Entry("connectionFailed, same ports",
testCaseActionEnsureProvisioned{
- outSSHClientGetHostName: sshclient.Output{Err: sshclient.ErrConnectionRefused},
+ outSSHClientGetHostName: sshclient.Output{Err: syscall.ECONNREFUSED},
outSSHClientCloudInitStatus: sshclient.Output{},
outSSHClientCheckSigterm: sshclient.Output{},
outOldSSHClientCloudInitStatus: sshclient.Output{},
outOldSSHClientCheckSigterm: sshclient.Output{},
- samePorts: true,
- expectedActionResult: actionContinue{},
- expectedErrorType: infrav1.ErrorTypeConnectionError,
- expectsSSHClientCallCloudInitStatus: false,
- expectsSSHClientCallCheckSigterm: false,
- expectsSSHClientCallReboot: false,
- expectsOldSSHClientCallCloudInitStatus: false,
- expectsOldSSHClientCallCheckSigterm: false,
- expectsOldSSHClientCallReboot: false,
- },
- ),
- Entry("connectionFailed, different ports, connectionFailed of oldSSHClient",
- testCaseActionEnsureProvisioned{
- outSSHClientGetHostName: sshclient.Output{Err: sshclient.ErrConnectionRefused},
- outSSHClientCloudInitStatus: sshclient.Output{},
- outSSHClientCheckSigterm: sshclient.Output{},
- outOldSSHClientCloudInitStatus: sshclient.Output{Err: sshclient.ErrConnectionRefused},
- outOldSSHClientCheckSigterm: sshclient.Output{},
- samePorts: false,
- expectedActionResult: actionContinue{},
- expectedErrorType: infrav1.ErrorTypeConnectionError,
- expectsSSHClientCallCloudInitStatus: false,
- expectsSSHClientCallCheckSigterm: false,
- expectsSSHClientCallReboot: false,
- expectsOldSSHClientCallCloudInitStatus: true,
- expectsOldSSHClientCallCheckSigterm: false,
- expectsOldSSHClientCallReboot: false,
- },
- ),
- Entry("connectionFailed, different ports, status running of oldSSHClient",
- testCaseActionEnsureProvisioned{
- outSSHClientGetHostName: sshclient.Output{Err: sshclient.ErrConnectionRefused},
- outSSHClientCloudInitStatus: sshclient.Output{},
- outSSHClientCheckSigterm: sshclient.Output{},
- outOldSSHClientCloudInitStatus: sshclient.Output{StdOut: "status: running"},
- outOldSSHClientCheckSigterm: sshclient.Output{},
- samePorts: false,
- expectedActionResult: actionContinue{},
- expectedErrorType: infrav1.ErrorType(""),
- expectsSSHClientCallCloudInitStatus: false,
- expectsSSHClientCallCheckSigterm: false,
- expectsSSHClientCallReboot: false,
- expectsOldSSHClientCallCloudInitStatus: true,
- expectsOldSSHClientCallCheckSigterm: false,
- expectsOldSSHClientCallReboot: false,
- },
- ),
- Entry("connectionFailed, different ports, status error of oldSSHClient",
- testCaseActionEnsureProvisioned{
- outSSHClientGetHostName: sshclient.Output{Err: sshclient.ErrConnectionRefused},
- outSSHClientCloudInitStatus: sshclient.Output{},
- outSSHClientCheckSigterm: sshclient.Output{},
- outOldSSHClientCloudInitStatus: sshclient.Output{StdOut: "status: error"},
- outOldSSHClientCheckSigterm: sshclient.Output{},
- samePorts: false,
- expectedActionResult: actionFailed{},
- expectedErrorType: infrav1.FatalError,
- expectsSSHClientCallCloudInitStatus: false,
- expectsSSHClientCallCheckSigterm: false,
- expectsSSHClientCallReboot: false,
- expectsOldSSHClientCallCloudInitStatus: true,
- expectsOldSSHClientCallCheckSigterm: false,
- expectsOldSSHClientCallReboot: false,
- },
- ),
- Entry("connectionFailed, different ports, status disabled of oldSSHClient",
- testCaseActionEnsureProvisioned{
- outSSHClientGetHostName: sshclient.Output{Err: sshclient.ErrConnectionRefused},
- outSSHClientCloudInitStatus: sshclient.Output{},
- outSSHClientCheckSigterm: sshclient.Output{},
- outOldSSHClientCloudInitStatus: sshclient.Output{StdOut: "status: disabled"},
- outOldSSHClientCheckSigterm: sshclient.Output{},
- samePorts: false,
- expectedActionResult: actionContinue{},
- expectedErrorType: infrav1.ErrorType(""),
- expectsSSHClientCallCloudInitStatus: false,
- expectsSSHClientCallCheckSigterm: false,
- expectsSSHClientCallReboot: false,
- expectsOldSSHClientCallCloudInitStatus: true,
- expectsOldSSHClientCallCheckSigterm: false,
- expectsOldSSHClientCallReboot: true,
- },
- ),
- Entry("connectionFailed, different ports, status done of oldSSHClient, SIGTERM of oldSSHClient",
- testCaseActionEnsureProvisioned{
- outSSHClientGetHostName: sshclient.Output{Err: sshclient.ErrConnectionRefused},
- outSSHClientCloudInitStatus: sshclient.Output{},
- outSSHClientCheckSigterm: sshclient.Output{},
- outOldSSHClientCloudInitStatus: sshclient.Output{StdOut: "status: done"},
- outOldSSHClientCheckSigterm: sshclient.Output{StdOut: "found SIGTERM in cloud init output logs"},
- samePorts: false,
- expectedActionResult: actionContinue{},
- expectedErrorType: infrav1.ErrorType(""),
- expectsSSHClientCallCloudInitStatus: false,
- expectsSSHClientCallCheckSigterm: false,
- expectsSSHClientCallReboot: false,
- expectsOldSSHClientCallCloudInitStatus: true,
- expectsOldSSHClientCallCheckSigterm: true,
- expectsOldSSHClientCallReboot: true,
- },
- ),
- Entry("connectionFailed, different ports, status done of oldSSHClient, no SIGTERM of oldSSHClient",
- testCaseActionEnsureProvisioned{
- outSSHClientGetHostName: sshclient.Output{Err: sshclient.ErrConnectionRefused},
- outSSHClientCloudInitStatus: sshclient.Output{},
- outSSHClientCheckSigterm: sshclient.Output{},
- outOldSSHClientCloudInitStatus: sshclient.Output{StdOut: "status: done"},
- outOldSSHClientCheckSigterm: sshclient.Output{StdOut: ""},
- samePorts: false,
- expectedActionResult: actionContinue{},
- expectedErrorType: infrav1.ErrorType(""),
- expectsSSHClientCallCloudInitStatus: false,
- expectsSSHClientCallCheckSigterm: false,
- expectsSSHClientCallReboot: false,
- expectsOldSSHClientCallCloudInitStatus: true,
- expectsOldSSHClientCallCheckSigterm: true,
- expectsOldSSHClientCallReboot: false,
- },
- ),
- Entry("connectionFailed, different ports, timeout of oldSSHClient",
- testCaseActionEnsureProvisioned{
- outSSHClientGetHostName: sshclient.Output{Err: sshclient.ErrConnectionRefused},
- outSSHClientCloudInitStatus: sshclient.Output{},
- outSSHClientCheckSigterm: sshclient.Output{},
- outOldSSHClientCloudInitStatus: sshclient.Output{Err: timeout},
- outOldSSHClientCheckSigterm: sshclient.Output{},
- samePorts: false,
expectedActionResult: actionContinue{},
expectedErrorType: infrav1.ErrorTypeConnectionError,
expectsSSHClientCallCloudInitStatus: false,
expectsSSHClientCallCheckSigterm: false,
expectsSSHClientCallReboot: false,
- expectsOldSSHClientCallCloudInitStatus: true,
- expectsOldSSHClientCallCheckSigterm: false,
- expectsOldSSHClientCallReboot: false,
- },
- ),
- Entry("correct hostname, cloud init done, no SIGTERM, ports different",
- testCaseActionEnsureProvisioned{
- outSSHClientGetHostName: sshclient.Output{StdOut: infrav1.BareMetalHostNamePrefix + "bm-machine"},
- outSSHClientCloudInitStatus: sshclient.Output{StdOut: "status: done"},
- outSSHClientCheckSigterm: sshclient.Output{StdOut: ""},
- outOldSSHClientCloudInitStatus: sshclient.Output{},
- outOldSSHClientCheckSigterm: sshclient.Output{},
- samePorts: false,
- expectedActionResult: actionComplete{},
- expectedErrorType: infrav1.ErrorType(""),
- expectsSSHClientCallCloudInitStatus: true,
- expectsSSHClientCallCheckSigterm: false,
- expectsSSHClientCallReboot: false,
- expectsOldSSHClientCallCloudInitStatus: false,
- expectsOldSSHClientCallCheckSigterm: false,
- expectsOldSSHClientCallReboot: false,
- },
- ),
- Entry("timeout of sshclient",
- testCaseActionEnsureProvisioned{
- outSSHClientGetHostName: sshclient.Output{Err: timeout},
- outSSHClientCloudInitStatus: sshclient.Output{},
- outSSHClientCheckSigterm: sshclient.Output{},
- outOldSSHClientCloudInitStatus: sshclient.Output{},
- outOldSSHClientCheckSigterm: sshclient.Output{},
- samePorts: false,
- expectedActionResult: actionContinue{},
- expectedErrorType: infrav1.ErrorTypeSSHRebootTriggered,
- expectsSSHClientCallCloudInitStatus: false,
- expectsSSHClientCallCheckSigterm: false,
- expectsSSHClientCallReboot: false,
expectsOldSSHClientCallCloudInitStatus: false,
expectsOldSSHClientCallCheckSigterm: false,
expectsOldSSHClientCallReboot: false,
@@ -1743,7 +1718,7 @@ var _ = Describe("actionEnsureProvisioned", func() {
)
})
-var _ = Describe("actionProvisioned", func() {
+var _ = Describe("actionProvisioned SSHAfterInstallImage=true", func() {
type testCaseActionProvisioned struct {
shouldHaveRebootAnnotation bool
rebooted bool
@@ -1759,13 +1734,14 @@ var _ = Describe("actionProvisioned", func() {
host := helpers.BareMetalHost(
"test-host",
"default",
- helpers.WithSSHSpecInclPorts(23, 24),
+ helpers.WithSSHSpecInclPorts(23),
helpers.WithIPv4(),
helpers.WithConsumerRef(),
)
if tc.shouldHaveRebootAnnotation {
host.SetAnnotations(map[string]string{infrav1.RebootAnnotation: "reboot"})
+ host.Spec.Status.ExternalIDs.RebootAnnotationNodeBootID = fakeBootID
}
host.Spec.Status.Rebooted = tc.rebooted
@@ -1827,3 +1803,99 @@ var _ = Describe("actionProvisioned", func() {
}),
)
})
+
+var _ = Describe("actionProvisioned SSHAfterInstallImage=false", func() {
+ It("test reboot annotation for SSHAfterInstallImage=false, Reboot should be triggered", func() {
+ ctx := context.Background()
+ host := helpers.BareMetalHost(
+ "test-host",
+ "default",
+ helpers.WithSSHSpecInclPorts(23),
+ helpers.WithIPv4(),
+ helpers.WithConsumerRef(),
+ )
+
+ host.SetAnnotations(map[string]string{infrav1.RebootAnnotation: "reboot"})
+ host.Spec.Status.ExternalIDs.RebootAnnotationNodeBootID = fakeBootID
+
+ host.Spec.Status.Rebooted = false
+
+ robotMock := robotmock.Client{}
+ robotMock.On("RebootBMServer", mock.Anything, mock.Anything).Return(nil, nil).Once()
+
+ service := newTestService(host, &robotMock, nil, helpers.GetDefaultSSHSecret(osSSHKeyName, "default"), helpers.GetDefaultSSHSecret(rescueSSHKeyName, "default"))
+ Expect(service.scope.RobotClient).ToNot(BeNil())
+ service.scope.SSHAfterInstallImage = false
+
+ actResult := service.actionProvisioned(ctx)
+ Expect(actResult).Should(BeAssignableToTypeOf(actionContinue{}))
+ Expect(robotMock.AssertNumberOfCalls(GinkgoT(), "RebootBMServer", 1)).To(BeTrue())
+ c := conditions.Get(host, infrav1.RebootSucceededCondition)
+ Expect(c.Message).To(ContainSubstring("Rebooting because annotation was set"))
+ Expect(host.Spec.Status.Rebooted).To(BeTrue())
+ })
+
+ It("test reboot annotation for SSHAfterInstallImage=false, reach: Waiting for BootID of Node", func() {
+ ctx := context.Background()
+ host := helpers.BareMetalHost(
+ "test-host",
+ "default",
+ helpers.WithSSHSpecInclPorts(23),
+ helpers.WithIPv4(),
+ helpers.WithConsumerRef(),
+ )
+
+ host.SetAnnotations(map[string]string{infrav1.RebootAnnotation: "reboot"})
+ host.Spec.Status.ExternalIDs.RebootAnnotationNodeBootID = fakeBootID
+ host.Spec.Status.Rebooted = true
+
+ service := newTestService(host, nil, nil, helpers.GetDefaultSSHSecret(osSSHKeyName, "default"), helpers.GetDefaultSSHSecret(rescueSSHKeyName, "default"))
+ service.scope.SSHAfterInstallImage = false
+
+ actResult := service.actionProvisioned(ctx)
+ Expect(actResult).Should(BeAssignableToTypeOf(actionContinue{}))
+ c := conditions.Get(host, infrav1.RebootSucceededCondition)
+ Expect(c.Message).To(ContainSubstring("Waiting for BootID of Node (in wl-cluster) to change"))
+ })
+
+ It("test reboot annotation for SSHAfterInstallImage=false, finished with healthy Condition", func() {
+ // Change BootID
+ ctx := context.Background()
+ host := helpers.BareMetalHost(
+ "test-host",
+ "default",
+ helpers.WithSSHSpecInclPorts(23),
+ helpers.WithIPv4(),
+ helpers.WithConsumerRef(),
+ )
+
+ host.SetAnnotations(map[string]string{infrav1.RebootAnnotation: "reboot"})
+ host.Spec.Status.ExternalIDs.RebootAnnotationNodeBootID = fakeBootID
+ host.Spec.Status.Rebooted = true
+
+ node := &corev1.Node{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: host.Name,
+ },
+ }
+
+ service := newTestService(host, nil, nil, helpers.GetDefaultSSHSecret(osSSHKeyName, "default"), helpers.GetDefaultSSHSecret(rescueSSHKeyName, "default"))
+
+ err := service.scope.Client.Get(ctx, client.ObjectKeyFromObject(node), node)
+ Expect(err).ToNot(HaveOccurred())
+
+ node.Status.NodeInfo.BootID = "98765"
+ err = service.scope.Client.Status().Update(ctx, node)
+ Expect(err).ToNot(HaveOccurred())
+
+ // Call actionProvisioned
+ actResult := service.actionProvisioned(ctx)
+ Expect(actResult).Should(BeAssignableToTypeOf(actionComplete{}))
+
+ // Condition should be fine
+ c := conditions.Get(host, infrav1.RebootSucceededCondition)
+ Expect(c.Message).To(Equal(""))
+ Expect(c.Status).To(Equal(corev1.ConditionTrue))
+ Expect(host.GetAnnotations()).To(BeEmpty())
+ })
+})
diff --git a/pkg/services/baremetal/host/state_machine_test.go b/pkg/services/baremetal/host/state_machine_test.go
index 50453ecbd..08a7d2046 100644
--- a/pkg/services/baremetal/host/state_machine_test.go
+++ b/pkg/services/baremetal/host/state_machine_test.go
@@ -44,7 +44,7 @@ var _ = Describe("updateSSHKey", func() {
"test-host",
"default",
helpers.WithSSHStatus(),
- helpers.WithSSHSpecInclPorts(23, 24),
+ helpers.WithSSHSpecInclPorts(23),
)
dataHashOS, err := infrav1.HashOfSecretData(tc.osSecretData)
diff --git a/pkg/services/hcloud/client/client.go b/pkg/services/hcloud/client/client.go
index 777d2ce2d..16f51b3db 100644
--- a/pkg/services/hcloud/client/client.go
+++ b/pkg/services/hcloud/client/client.go
@@ -75,6 +75,12 @@ type Client interface {
DeletePlacementGroup(context.Context, int64) error
ListPlacementGroups(context.Context, hcloud.PlacementGroupListOpts) ([]*hcloud.PlacementGroup, error)
AddServerToPlacementGroup(context.Context, *hcloud.Server, *hcloud.PlacementGroup) error
+
+ EnableRescueSystem(context.Context, *hcloud.Server, *hcloud.ServerEnableRescueOpts) (hcloud.ServerEnableRescueResult, error)
+
+ Reboot(context.Context, *hcloud.Server) (*hcloud.Action, error)
+
+ GetAction(ctx context.Context, actionID int64) (*hcloud.Action, error)
}
// Factory is the interface for creating new Client objects.
@@ -328,3 +334,27 @@ func (c *realClient) AddServerToPlacementGroup(ctx context.Context, server *hclo
_, _, err := c.client.Server.AddToPlacementGroup(ctx, server, pg)
return err
}
+
+func (c *realClient) EnableRescueSystem(ctx context.Context, server *hcloud.Server, rescueOpts *hcloud.ServerEnableRescueOpts) (result hcloud.ServerEnableRescueResult, reterr error) {
+ result, _, err := c.client.Server.EnableRescue(ctx, server, *rescueOpts)
+ if err != nil {
+ return result, fmt.Errorf("EnableRescue failed for %d: %w", server.ID, err)
+ }
+ return result, nil
+}
+
+func (c *realClient) Reboot(ctx context.Context, server *hcloud.Server) (*hcloud.Action, error) {
+ action, _, err := c.client.Server.Reboot(ctx, server)
+ if err != nil {
+ return action, fmt.Errorf("Reboot failed for %d: %w", server.ID, err)
+ }
+ return action, nil
+}
+
+func (c *realClient) GetAction(ctx context.Context, actionID int64) (*hcloud.Action, error) {
+ action, _, err := c.client.Action.GetByID(ctx, actionID)
+ if err != nil {
+ return action, fmt.Errorf("getting hcloud action failed: %w", err)
+ }
+ return action, nil
+}
diff --git a/pkg/services/hcloud/client/fake/hcloud_client.go b/pkg/services/hcloud/client/fake/hcloud_client.go
index 1aae4077e..a80ad523f 100644
--- a/pkg/services/hcloud/client/fake/hcloud_client.go
+++ b/pkg/services/hcloud/client/fake/hcloud_client.go
@@ -802,6 +802,24 @@ func (c *cacheHCloudClient) AddServerToPlacementGroup(_ context.Context, server
return nil
}
+func (c *cacheHCloudClient) EnableRescueSystem(_ context.Context, _ *hcloud.Server, _ *hcloud.ServerEnableRescueOpts) (result hcloud.ServerEnableRescueResult, reterr error) {
+ return result, nil
+}
+
+func (c *cacheHCloudClient) Reboot(_ context.Context, _ *hcloud.Server) (*hcloud.Action, error) {
+ action := &hcloud.Action{
+ ID: 1,
+ }
+ return action, nil
+}
+
+func (c *cacheHCloudClient) GetAction(_ context.Context, _ int64) (*hcloud.Action, error) {
+ action := &hcloud.Action{
+ ID: 1,
+ }
+ return action, nil
+}
+
func isIntInList(list []int64, str int64) bool {
for _, s := range list {
if s == str {
diff --git a/pkg/services/hcloud/client/mocks/Client.go b/pkg/services/hcloud/client/mocks/Client.go
index 4ed39a3fe..a0b5264d2 100644
--- a/pkg/services/hcloud/client/mocks/Client.go
+++ b/pkg/services/hcloud/client/mocks/Client.go
@@ -1,4 +1,4 @@
-// Code generated by mockery v2.40.2. DO NOT EDIT.
+// Code generated by mockery v2.53.4. DO NOT EDIT.
package mocks
@@ -17,6 +17,14 @@ type Client struct {
mock.Mock
}
+type Client_Expecter struct {
+ mock *mock.Mock
+}
+
+func (_m *Client) EXPECT() *Client_Expecter {
+ return &Client_Expecter{mock: &_m.Mock}
+}
+
// AddIPTargetToLoadBalancer provides a mock function with given fields: _a0, _a1, _a2
func (_m *Client) AddIPTargetToLoadBalancer(_a0 context.Context, _a1 hcloud.LoadBalancerAddIPTargetOpts, _a2 *hcloud.LoadBalancer) error {
ret := _m.Called(_a0, _a1, _a2)
@@ -35,6 +43,36 @@ func (_m *Client) AddIPTargetToLoadBalancer(_a0 context.Context, _a1 hcloud.Load
return r0
}
+// Client_AddIPTargetToLoadBalancer_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'AddIPTargetToLoadBalancer'
+type Client_AddIPTargetToLoadBalancer_Call struct {
+ *mock.Call
+}
+
+// AddIPTargetToLoadBalancer is a helper method to define mock.On call
+// - _a0 context.Context
+// - _a1 hcloud.LoadBalancerAddIPTargetOpts
+// - _a2 *hcloud.LoadBalancer
+func (_e *Client_Expecter) AddIPTargetToLoadBalancer(_a0 interface{}, _a1 interface{}, _a2 interface{}) *Client_AddIPTargetToLoadBalancer_Call {
+ return &Client_AddIPTargetToLoadBalancer_Call{Call: _e.mock.On("AddIPTargetToLoadBalancer", _a0, _a1, _a2)}
+}
+
+func (_c *Client_AddIPTargetToLoadBalancer_Call) Run(run func(_a0 context.Context, _a1 hcloud.LoadBalancerAddIPTargetOpts, _a2 *hcloud.LoadBalancer)) *Client_AddIPTargetToLoadBalancer_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(hcloud.LoadBalancerAddIPTargetOpts), args[2].(*hcloud.LoadBalancer))
+ })
+ return _c
+}
+
+func (_c *Client_AddIPTargetToLoadBalancer_Call) Return(_a0 error) *Client_AddIPTargetToLoadBalancer_Call {
+ _c.Call.Return(_a0)
+ return _c
+}
+
+func (_c *Client_AddIPTargetToLoadBalancer_Call) RunAndReturn(run func(context.Context, hcloud.LoadBalancerAddIPTargetOpts, *hcloud.LoadBalancer) error) *Client_AddIPTargetToLoadBalancer_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
// AddServerToPlacementGroup provides a mock function with given fields: _a0, _a1, _a2
func (_m *Client) AddServerToPlacementGroup(_a0 context.Context, _a1 *hcloud.Server, _a2 *hcloud.PlacementGroup) error {
ret := _m.Called(_a0, _a1, _a2)
@@ -53,6 +91,36 @@ func (_m *Client) AddServerToPlacementGroup(_a0 context.Context, _a1 *hcloud.Ser
return r0
}
+// Client_AddServerToPlacementGroup_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'AddServerToPlacementGroup'
+type Client_AddServerToPlacementGroup_Call struct {
+ *mock.Call
+}
+
+// AddServerToPlacementGroup is a helper method to define mock.On call
+// - _a0 context.Context
+// - _a1 *hcloud.Server
+// - _a2 *hcloud.PlacementGroup
+func (_e *Client_Expecter) AddServerToPlacementGroup(_a0 interface{}, _a1 interface{}, _a2 interface{}) *Client_AddServerToPlacementGroup_Call {
+ return &Client_AddServerToPlacementGroup_Call{Call: _e.mock.On("AddServerToPlacementGroup", _a0, _a1, _a2)}
+}
+
+func (_c *Client_AddServerToPlacementGroup_Call) Run(run func(_a0 context.Context, _a1 *hcloud.Server, _a2 *hcloud.PlacementGroup)) *Client_AddServerToPlacementGroup_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(*hcloud.Server), args[2].(*hcloud.PlacementGroup))
+ })
+ return _c
+}
+
+func (_c *Client_AddServerToPlacementGroup_Call) Return(_a0 error) *Client_AddServerToPlacementGroup_Call {
+ _c.Call.Return(_a0)
+ return _c
+}
+
+func (_c *Client_AddServerToPlacementGroup_Call) RunAndReturn(run func(context.Context, *hcloud.Server, *hcloud.PlacementGroup) error) *Client_AddServerToPlacementGroup_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
// AddServiceToLoadBalancer provides a mock function with given fields: _a0, _a1, _a2
func (_m *Client) AddServiceToLoadBalancer(_a0 context.Context, _a1 *hcloud.LoadBalancer, _a2 hcloud.LoadBalancerAddServiceOpts) error {
ret := _m.Called(_a0, _a1, _a2)
@@ -71,6 +139,36 @@ func (_m *Client) AddServiceToLoadBalancer(_a0 context.Context, _a1 *hcloud.Load
return r0
}
+// Client_AddServiceToLoadBalancer_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'AddServiceToLoadBalancer'
+type Client_AddServiceToLoadBalancer_Call struct {
+ *mock.Call
+}
+
+// AddServiceToLoadBalancer is a helper method to define mock.On call
+// - _a0 context.Context
+// - _a1 *hcloud.LoadBalancer
+// - _a2 hcloud.LoadBalancerAddServiceOpts
+func (_e *Client_Expecter) AddServiceToLoadBalancer(_a0 interface{}, _a1 interface{}, _a2 interface{}) *Client_AddServiceToLoadBalancer_Call {
+ return &Client_AddServiceToLoadBalancer_Call{Call: _e.mock.On("AddServiceToLoadBalancer", _a0, _a1, _a2)}
+}
+
+func (_c *Client_AddServiceToLoadBalancer_Call) Run(run func(_a0 context.Context, _a1 *hcloud.LoadBalancer, _a2 hcloud.LoadBalancerAddServiceOpts)) *Client_AddServiceToLoadBalancer_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(*hcloud.LoadBalancer), args[2].(hcloud.LoadBalancerAddServiceOpts))
+ })
+ return _c
+}
+
+func (_c *Client_AddServiceToLoadBalancer_Call) Return(_a0 error) *Client_AddServiceToLoadBalancer_Call {
+ _c.Call.Return(_a0)
+ return _c
+}
+
+func (_c *Client_AddServiceToLoadBalancer_Call) RunAndReturn(run func(context.Context, *hcloud.LoadBalancer, hcloud.LoadBalancerAddServiceOpts) error) *Client_AddServiceToLoadBalancer_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
// AddTargetServerToLoadBalancer provides a mock function with given fields: _a0, _a1, _a2
func (_m *Client) AddTargetServerToLoadBalancer(_a0 context.Context, _a1 hcloud.LoadBalancerAddServerTargetOpts, _a2 *hcloud.LoadBalancer) error {
ret := _m.Called(_a0, _a1, _a2)
@@ -89,6 +187,36 @@ func (_m *Client) AddTargetServerToLoadBalancer(_a0 context.Context, _a1 hcloud.
return r0
}
+// Client_AddTargetServerToLoadBalancer_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'AddTargetServerToLoadBalancer'
+type Client_AddTargetServerToLoadBalancer_Call struct {
+ *mock.Call
+}
+
+// AddTargetServerToLoadBalancer is a helper method to define mock.On call
+// - _a0 context.Context
+// - _a1 hcloud.LoadBalancerAddServerTargetOpts
+// - _a2 *hcloud.LoadBalancer
+func (_e *Client_Expecter) AddTargetServerToLoadBalancer(_a0 interface{}, _a1 interface{}, _a2 interface{}) *Client_AddTargetServerToLoadBalancer_Call {
+ return &Client_AddTargetServerToLoadBalancer_Call{Call: _e.mock.On("AddTargetServerToLoadBalancer", _a0, _a1, _a2)}
+}
+
+func (_c *Client_AddTargetServerToLoadBalancer_Call) Run(run func(_a0 context.Context, _a1 hcloud.LoadBalancerAddServerTargetOpts, _a2 *hcloud.LoadBalancer)) *Client_AddTargetServerToLoadBalancer_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(hcloud.LoadBalancerAddServerTargetOpts), args[2].(*hcloud.LoadBalancer))
+ })
+ return _c
+}
+
+func (_c *Client_AddTargetServerToLoadBalancer_Call) Return(_a0 error) *Client_AddTargetServerToLoadBalancer_Call {
+ _c.Call.Return(_a0)
+ return _c
+}
+
+func (_c *Client_AddTargetServerToLoadBalancer_Call) RunAndReturn(run func(context.Context, hcloud.LoadBalancerAddServerTargetOpts, *hcloud.LoadBalancer) error) *Client_AddTargetServerToLoadBalancer_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
// AttachLoadBalancerToNetwork provides a mock function with given fields: _a0, _a1, _a2
func (_m *Client) AttachLoadBalancerToNetwork(_a0 context.Context, _a1 *hcloud.LoadBalancer, _a2 hcloud.LoadBalancerAttachToNetworkOpts) error {
ret := _m.Called(_a0, _a1, _a2)
@@ -107,6 +235,36 @@ func (_m *Client) AttachLoadBalancerToNetwork(_a0 context.Context, _a1 *hcloud.L
return r0
}
+// Client_AttachLoadBalancerToNetwork_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'AttachLoadBalancerToNetwork'
+type Client_AttachLoadBalancerToNetwork_Call struct {
+ *mock.Call
+}
+
+// AttachLoadBalancerToNetwork is a helper method to define mock.On call
+// - _a0 context.Context
+// - _a1 *hcloud.LoadBalancer
+// - _a2 hcloud.LoadBalancerAttachToNetworkOpts
+func (_e *Client_Expecter) AttachLoadBalancerToNetwork(_a0 interface{}, _a1 interface{}, _a2 interface{}) *Client_AttachLoadBalancerToNetwork_Call {
+ return &Client_AttachLoadBalancerToNetwork_Call{Call: _e.mock.On("AttachLoadBalancerToNetwork", _a0, _a1, _a2)}
+}
+
+func (_c *Client_AttachLoadBalancerToNetwork_Call) Run(run func(_a0 context.Context, _a1 *hcloud.LoadBalancer, _a2 hcloud.LoadBalancerAttachToNetworkOpts)) *Client_AttachLoadBalancerToNetwork_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(*hcloud.LoadBalancer), args[2].(hcloud.LoadBalancerAttachToNetworkOpts))
+ })
+ return _c
+}
+
+func (_c *Client_AttachLoadBalancerToNetwork_Call) Return(_a0 error) *Client_AttachLoadBalancerToNetwork_Call {
+ _c.Call.Return(_a0)
+ return _c
+}
+
+func (_c *Client_AttachLoadBalancerToNetwork_Call) RunAndReturn(run func(context.Context, *hcloud.LoadBalancer, hcloud.LoadBalancerAttachToNetworkOpts) error) *Client_AttachLoadBalancerToNetwork_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
// AttachServerToNetwork provides a mock function with given fields: _a0, _a1, _a2
func (_m *Client) AttachServerToNetwork(_a0 context.Context, _a1 *hcloud.Server, _a2 hcloud.ServerAttachToNetworkOpts) error {
ret := _m.Called(_a0, _a1, _a2)
@@ -125,6 +283,36 @@ func (_m *Client) AttachServerToNetwork(_a0 context.Context, _a1 *hcloud.Server,
return r0
}
+// Client_AttachServerToNetwork_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'AttachServerToNetwork'
+type Client_AttachServerToNetwork_Call struct {
+ *mock.Call
+}
+
+// AttachServerToNetwork is a helper method to define mock.On call
+// - _a0 context.Context
+// - _a1 *hcloud.Server
+// - _a2 hcloud.ServerAttachToNetworkOpts
+func (_e *Client_Expecter) AttachServerToNetwork(_a0 interface{}, _a1 interface{}, _a2 interface{}) *Client_AttachServerToNetwork_Call {
+ return &Client_AttachServerToNetwork_Call{Call: _e.mock.On("AttachServerToNetwork", _a0, _a1, _a2)}
+}
+
+func (_c *Client_AttachServerToNetwork_Call) Run(run func(_a0 context.Context, _a1 *hcloud.Server, _a2 hcloud.ServerAttachToNetworkOpts)) *Client_AttachServerToNetwork_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(*hcloud.Server), args[2].(hcloud.ServerAttachToNetworkOpts))
+ })
+ return _c
+}
+
+func (_c *Client_AttachServerToNetwork_Call) Return(_a0 error) *Client_AttachServerToNetwork_Call {
+ _c.Call.Return(_a0)
+ return _c
+}
+
+func (_c *Client_AttachServerToNetwork_Call) RunAndReturn(run func(context.Context, *hcloud.Server, hcloud.ServerAttachToNetworkOpts) error) *Client_AttachServerToNetwork_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
// ChangeLoadBalancerAlgorithm provides a mock function with given fields: _a0, _a1, _a2
func (_m *Client) ChangeLoadBalancerAlgorithm(_a0 context.Context, _a1 *hcloud.LoadBalancer, _a2 hcloud.LoadBalancerChangeAlgorithmOpts) error {
ret := _m.Called(_a0, _a1, _a2)
@@ -143,6 +331,36 @@ func (_m *Client) ChangeLoadBalancerAlgorithm(_a0 context.Context, _a1 *hcloud.L
return r0
}
+// Client_ChangeLoadBalancerAlgorithm_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ChangeLoadBalancerAlgorithm'
+type Client_ChangeLoadBalancerAlgorithm_Call struct {
+ *mock.Call
+}
+
+// ChangeLoadBalancerAlgorithm is a helper method to define mock.On call
+// - _a0 context.Context
+// - _a1 *hcloud.LoadBalancer
+// - _a2 hcloud.LoadBalancerChangeAlgorithmOpts
+func (_e *Client_Expecter) ChangeLoadBalancerAlgorithm(_a0 interface{}, _a1 interface{}, _a2 interface{}) *Client_ChangeLoadBalancerAlgorithm_Call {
+ return &Client_ChangeLoadBalancerAlgorithm_Call{Call: _e.mock.On("ChangeLoadBalancerAlgorithm", _a0, _a1, _a2)}
+}
+
+func (_c *Client_ChangeLoadBalancerAlgorithm_Call) Run(run func(_a0 context.Context, _a1 *hcloud.LoadBalancer, _a2 hcloud.LoadBalancerChangeAlgorithmOpts)) *Client_ChangeLoadBalancerAlgorithm_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(*hcloud.LoadBalancer), args[2].(hcloud.LoadBalancerChangeAlgorithmOpts))
+ })
+ return _c
+}
+
+func (_c *Client_ChangeLoadBalancerAlgorithm_Call) Return(_a0 error) *Client_ChangeLoadBalancerAlgorithm_Call {
+ _c.Call.Return(_a0)
+ return _c
+}
+
+func (_c *Client_ChangeLoadBalancerAlgorithm_Call) RunAndReturn(run func(context.Context, *hcloud.LoadBalancer, hcloud.LoadBalancerChangeAlgorithmOpts) error) *Client_ChangeLoadBalancerAlgorithm_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
// ChangeLoadBalancerType provides a mock function with given fields: _a0, _a1, _a2
func (_m *Client) ChangeLoadBalancerType(_a0 context.Context, _a1 *hcloud.LoadBalancer, _a2 hcloud.LoadBalancerChangeTypeOpts) error {
ret := _m.Called(_a0, _a1, _a2)
@@ -161,6 +379,36 @@ func (_m *Client) ChangeLoadBalancerType(_a0 context.Context, _a1 *hcloud.LoadBa
return r0
}
+// Client_ChangeLoadBalancerType_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ChangeLoadBalancerType'
+type Client_ChangeLoadBalancerType_Call struct {
+ *mock.Call
+}
+
+// ChangeLoadBalancerType is a helper method to define mock.On call
+// - _a0 context.Context
+// - _a1 *hcloud.LoadBalancer
+// - _a2 hcloud.LoadBalancerChangeTypeOpts
+func (_e *Client_Expecter) ChangeLoadBalancerType(_a0 interface{}, _a1 interface{}, _a2 interface{}) *Client_ChangeLoadBalancerType_Call {
+ return &Client_ChangeLoadBalancerType_Call{Call: _e.mock.On("ChangeLoadBalancerType", _a0, _a1, _a2)}
+}
+
+func (_c *Client_ChangeLoadBalancerType_Call) Run(run func(_a0 context.Context, _a1 *hcloud.LoadBalancer, _a2 hcloud.LoadBalancerChangeTypeOpts)) *Client_ChangeLoadBalancerType_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(*hcloud.LoadBalancer), args[2].(hcloud.LoadBalancerChangeTypeOpts))
+ })
+ return _c
+}
+
+func (_c *Client_ChangeLoadBalancerType_Call) Return(_a0 error) *Client_ChangeLoadBalancerType_Call {
+ _c.Call.Return(_a0)
+ return _c
+}
+
+func (_c *Client_ChangeLoadBalancerType_Call) RunAndReturn(run func(context.Context, *hcloud.LoadBalancer, hcloud.LoadBalancerChangeTypeOpts) error) *Client_ChangeLoadBalancerType_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
// CreateLoadBalancer provides a mock function with given fields: _a0, _a1
func (_m *Client) CreateLoadBalancer(_a0 context.Context, _a1 hcloud.LoadBalancerCreateOpts) (*hcloud.LoadBalancer, error) {
ret := _m.Called(_a0, _a1)
@@ -191,6 +439,35 @@ func (_m *Client) CreateLoadBalancer(_a0 context.Context, _a1 hcloud.LoadBalance
return r0, r1
}
+// Client_CreateLoadBalancer_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'CreateLoadBalancer'
+type Client_CreateLoadBalancer_Call struct {
+ *mock.Call
+}
+
+// CreateLoadBalancer is a helper method to define mock.On call
+// - _a0 context.Context
+// - _a1 hcloud.LoadBalancerCreateOpts
+func (_e *Client_Expecter) CreateLoadBalancer(_a0 interface{}, _a1 interface{}) *Client_CreateLoadBalancer_Call {
+ return &Client_CreateLoadBalancer_Call{Call: _e.mock.On("CreateLoadBalancer", _a0, _a1)}
+}
+
+func (_c *Client_CreateLoadBalancer_Call) Run(run func(_a0 context.Context, _a1 hcloud.LoadBalancerCreateOpts)) *Client_CreateLoadBalancer_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(hcloud.LoadBalancerCreateOpts))
+ })
+ return _c
+}
+
+func (_c *Client_CreateLoadBalancer_Call) Return(_a0 *hcloud.LoadBalancer, _a1 error) *Client_CreateLoadBalancer_Call {
+ _c.Call.Return(_a0, _a1)
+ return _c
+}
+
+func (_c *Client_CreateLoadBalancer_Call) RunAndReturn(run func(context.Context, hcloud.LoadBalancerCreateOpts) (*hcloud.LoadBalancer, error)) *Client_CreateLoadBalancer_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
// CreateNetwork provides a mock function with given fields: _a0, _a1
func (_m *Client) CreateNetwork(_a0 context.Context, _a1 hcloud.NetworkCreateOpts) (*hcloud.Network, error) {
ret := _m.Called(_a0, _a1)
@@ -221,6 +498,35 @@ func (_m *Client) CreateNetwork(_a0 context.Context, _a1 hcloud.NetworkCreateOpt
return r0, r1
}
+// Client_CreateNetwork_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'CreateNetwork'
+type Client_CreateNetwork_Call struct {
+ *mock.Call
+}
+
+// CreateNetwork is a helper method to define mock.On call
+// - _a0 context.Context
+// - _a1 hcloud.NetworkCreateOpts
+func (_e *Client_Expecter) CreateNetwork(_a0 interface{}, _a1 interface{}) *Client_CreateNetwork_Call {
+ return &Client_CreateNetwork_Call{Call: _e.mock.On("CreateNetwork", _a0, _a1)}
+}
+
+func (_c *Client_CreateNetwork_Call) Run(run func(_a0 context.Context, _a1 hcloud.NetworkCreateOpts)) *Client_CreateNetwork_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(hcloud.NetworkCreateOpts))
+ })
+ return _c
+}
+
+func (_c *Client_CreateNetwork_Call) Return(_a0 *hcloud.Network, _a1 error) *Client_CreateNetwork_Call {
+ _c.Call.Return(_a0, _a1)
+ return _c
+}
+
+func (_c *Client_CreateNetwork_Call) RunAndReturn(run func(context.Context, hcloud.NetworkCreateOpts) (*hcloud.Network, error)) *Client_CreateNetwork_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
// CreatePlacementGroup provides a mock function with given fields: _a0, _a1
func (_m *Client) CreatePlacementGroup(_a0 context.Context, _a1 hcloud.PlacementGroupCreateOpts) (*hcloud.PlacementGroup, error) {
ret := _m.Called(_a0, _a1)
@@ -251,6 +557,35 @@ func (_m *Client) CreatePlacementGroup(_a0 context.Context, _a1 hcloud.Placement
return r0, r1
}
+// Client_CreatePlacementGroup_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'CreatePlacementGroup'
+type Client_CreatePlacementGroup_Call struct {
+ *mock.Call
+}
+
+// CreatePlacementGroup is a helper method to define mock.On call
+// - _a0 context.Context
+// - _a1 hcloud.PlacementGroupCreateOpts
+func (_e *Client_Expecter) CreatePlacementGroup(_a0 interface{}, _a1 interface{}) *Client_CreatePlacementGroup_Call {
+ return &Client_CreatePlacementGroup_Call{Call: _e.mock.On("CreatePlacementGroup", _a0, _a1)}
+}
+
+func (_c *Client_CreatePlacementGroup_Call) Run(run func(_a0 context.Context, _a1 hcloud.PlacementGroupCreateOpts)) *Client_CreatePlacementGroup_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(hcloud.PlacementGroupCreateOpts))
+ })
+ return _c
+}
+
+func (_c *Client_CreatePlacementGroup_Call) Return(_a0 *hcloud.PlacementGroup, _a1 error) *Client_CreatePlacementGroup_Call {
+ _c.Call.Return(_a0, _a1)
+ return _c
+}
+
+func (_c *Client_CreatePlacementGroup_Call) RunAndReturn(run func(context.Context, hcloud.PlacementGroupCreateOpts) (*hcloud.PlacementGroup, error)) *Client_CreatePlacementGroup_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
// CreateServer provides a mock function with given fields: _a0, _a1
func (_m *Client) CreateServer(_a0 context.Context, _a1 hcloud.ServerCreateOpts) (*hcloud.Server, error) {
ret := _m.Called(_a0, _a1)
@@ -281,6 +616,35 @@ func (_m *Client) CreateServer(_a0 context.Context, _a1 hcloud.ServerCreateOpts)
return r0, r1
}
+// Client_CreateServer_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'CreateServer'
+type Client_CreateServer_Call struct {
+ *mock.Call
+}
+
+// CreateServer is a helper method to define mock.On call
+// - _a0 context.Context
+// - _a1 hcloud.ServerCreateOpts
+func (_e *Client_Expecter) CreateServer(_a0 interface{}, _a1 interface{}) *Client_CreateServer_Call {
+ return &Client_CreateServer_Call{Call: _e.mock.On("CreateServer", _a0, _a1)}
+}
+
+func (_c *Client_CreateServer_Call) Run(run func(_a0 context.Context, _a1 hcloud.ServerCreateOpts)) *Client_CreateServer_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(hcloud.ServerCreateOpts))
+ })
+ return _c
+}
+
+func (_c *Client_CreateServer_Call) Return(_a0 *hcloud.Server, _a1 error) *Client_CreateServer_Call {
+ _c.Call.Return(_a0, _a1)
+ return _c
+}
+
+func (_c *Client_CreateServer_Call) RunAndReturn(run func(context.Context, hcloud.ServerCreateOpts) (*hcloud.Server, error)) *Client_CreateServer_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
// DeleteIPTargetOfLoadBalancer provides a mock function with given fields: _a0, _a1, _a2
func (_m *Client) DeleteIPTargetOfLoadBalancer(_a0 context.Context, _a1 *hcloud.LoadBalancer, _a2 net.IP) error {
ret := _m.Called(_a0, _a1, _a2)
@@ -299,6 +663,36 @@ func (_m *Client) DeleteIPTargetOfLoadBalancer(_a0 context.Context, _a1 *hcloud.
return r0
}
+// Client_DeleteIPTargetOfLoadBalancer_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'DeleteIPTargetOfLoadBalancer'
+type Client_DeleteIPTargetOfLoadBalancer_Call struct {
+ *mock.Call
+}
+
+// DeleteIPTargetOfLoadBalancer is a helper method to define mock.On call
+// - _a0 context.Context
+// - _a1 *hcloud.LoadBalancer
+// - _a2 net.IP
+func (_e *Client_Expecter) DeleteIPTargetOfLoadBalancer(_a0 interface{}, _a1 interface{}, _a2 interface{}) *Client_DeleteIPTargetOfLoadBalancer_Call {
+ return &Client_DeleteIPTargetOfLoadBalancer_Call{Call: _e.mock.On("DeleteIPTargetOfLoadBalancer", _a0, _a1, _a2)}
+}
+
+func (_c *Client_DeleteIPTargetOfLoadBalancer_Call) Run(run func(_a0 context.Context, _a1 *hcloud.LoadBalancer, _a2 net.IP)) *Client_DeleteIPTargetOfLoadBalancer_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(*hcloud.LoadBalancer), args[2].(net.IP))
+ })
+ return _c
+}
+
+func (_c *Client_DeleteIPTargetOfLoadBalancer_Call) Return(_a0 error) *Client_DeleteIPTargetOfLoadBalancer_Call {
+ _c.Call.Return(_a0)
+ return _c
+}
+
+func (_c *Client_DeleteIPTargetOfLoadBalancer_Call) RunAndReturn(run func(context.Context, *hcloud.LoadBalancer, net.IP) error) *Client_DeleteIPTargetOfLoadBalancer_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
// DeleteLoadBalancer provides a mock function with given fields: _a0, _a1
func (_m *Client) DeleteLoadBalancer(_a0 context.Context, _a1 int64) error {
ret := _m.Called(_a0, _a1)
@@ -317,6 +711,35 @@ func (_m *Client) DeleteLoadBalancer(_a0 context.Context, _a1 int64) error {
return r0
}
+// Client_DeleteLoadBalancer_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'DeleteLoadBalancer'
+type Client_DeleteLoadBalancer_Call struct {
+ *mock.Call
+}
+
+// DeleteLoadBalancer is a helper method to define mock.On call
+// - _a0 context.Context
+// - _a1 int64
+func (_e *Client_Expecter) DeleteLoadBalancer(_a0 interface{}, _a1 interface{}) *Client_DeleteLoadBalancer_Call {
+ return &Client_DeleteLoadBalancer_Call{Call: _e.mock.On("DeleteLoadBalancer", _a0, _a1)}
+}
+
+func (_c *Client_DeleteLoadBalancer_Call) Run(run func(_a0 context.Context, _a1 int64)) *Client_DeleteLoadBalancer_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(int64))
+ })
+ return _c
+}
+
+func (_c *Client_DeleteLoadBalancer_Call) Return(_a0 error) *Client_DeleteLoadBalancer_Call {
+ _c.Call.Return(_a0)
+ return _c
+}
+
+func (_c *Client_DeleteLoadBalancer_Call) RunAndReturn(run func(context.Context, int64) error) *Client_DeleteLoadBalancer_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
// DeleteNetwork provides a mock function with given fields: _a0, _a1
func (_m *Client) DeleteNetwork(_a0 context.Context, _a1 *hcloud.Network) error {
ret := _m.Called(_a0, _a1)
@@ -335,6 +758,35 @@ func (_m *Client) DeleteNetwork(_a0 context.Context, _a1 *hcloud.Network) error
return r0
}
+// Client_DeleteNetwork_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'DeleteNetwork'
+type Client_DeleteNetwork_Call struct {
+ *mock.Call
+}
+
+// DeleteNetwork is a helper method to define mock.On call
+// - _a0 context.Context
+// - _a1 *hcloud.Network
+func (_e *Client_Expecter) DeleteNetwork(_a0 interface{}, _a1 interface{}) *Client_DeleteNetwork_Call {
+ return &Client_DeleteNetwork_Call{Call: _e.mock.On("DeleteNetwork", _a0, _a1)}
+}
+
+func (_c *Client_DeleteNetwork_Call) Run(run func(_a0 context.Context, _a1 *hcloud.Network)) *Client_DeleteNetwork_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(*hcloud.Network))
+ })
+ return _c
+}
+
+func (_c *Client_DeleteNetwork_Call) Return(_a0 error) *Client_DeleteNetwork_Call {
+ _c.Call.Return(_a0)
+ return _c
+}
+
+func (_c *Client_DeleteNetwork_Call) RunAndReturn(run func(context.Context, *hcloud.Network) error) *Client_DeleteNetwork_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
// DeletePlacementGroup provides a mock function with given fields: _a0, _a1
func (_m *Client) DeletePlacementGroup(_a0 context.Context, _a1 int64) error {
ret := _m.Called(_a0, _a1)
@@ -353,6 +805,35 @@ func (_m *Client) DeletePlacementGroup(_a0 context.Context, _a1 int64) error {
return r0
}
+// Client_DeletePlacementGroup_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'DeletePlacementGroup'
+type Client_DeletePlacementGroup_Call struct {
+ *mock.Call
+}
+
+// DeletePlacementGroup is a helper method to define mock.On call
+// - _a0 context.Context
+// - _a1 int64
+func (_e *Client_Expecter) DeletePlacementGroup(_a0 interface{}, _a1 interface{}) *Client_DeletePlacementGroup_Call {
+ return &Client_DeletePlacementGroup_Call{Call: _e.mock.On("DeletePlacementGroup", _a0, _a1)}
+}
+
+func (_c *Client_DeletePlacementGroup_Call) Run(run func(_a0 context.Context, _a1 int64)) *Client_DeletePlacementGroup_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(int64))
+ })
+ return _c
+}
+
+func (_c *Client_DeletePlacementGroup_Call) Return(_a0 error) *Client_DeletePlacementGroup_Call {
+ _c.Call.Return(_a0)
+ return _c
+}
+
+func (_c *Client_DeletePlacementGroup_Call) RunAndReturn(run func(context.Context, int64) error) *Client_DeletePlacementGroup_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
// DeleteServer provides a mock function with given fields: _a0, _a1
func (_m *Client) DeleteServer(_a0 context.Context, _a1 *hcloud.Server) error {
ret := _m.Called(_a0, _a1)
@@ -371,6 +852,35 @@ func (_m *Client) DeleteServer(_a0 context.Context, _a1 *hcloud.Server) error {
return r0
}
+// Client_DeleteServer_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'DeleteServer'
+type Client_DeleteServer_Call struct {
+ *mock.Call
+}
+
+// DeleteServer is a helper method to define mock.On call
+// - _a0 context.Context
+// - _a1 *hcloud.Server
+func (_e *Client_Expecter) DeleteServer(_a0 interface{}, _a1 interface{}) *Client_DeleteServer_Call {
+ return &Client_DeleteServer_Call{Call: _e.mock.On("DeleteServer", _a0, _a1)}
+}
+
+func (_c *Client_DeleteServer_Call) Run(run func(_a0 context.Context, _a1 *hcloud.Server)) *Client_DeleteServer_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(*hcloud.Server))
+ })
+ return _c
+}
+
+func (_c *Client_DeleteServer_Call) Return(_a0 error) *Client_DeleteServer_Call {
+ _c.Call.Return(_a0)
+ return _c
+}
+
+func (_c *Client_DeleteServer_Call) RunAndReturn(run func(context.Context, *hcloud.Server) error) *Client_DeleteServer_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
// DeleteServiceFromLoadBalancer provides a mock function with given fields: _a0, _a1, _a2
func (_m *Client) DeleteServiceFromLoadBalancer(_a0 context.Context, _a1 *hcloud.LoadBalancer, _a2 int) error {
ret := _m.Called(_a0, _a1, _a2)
@@ -389,6 +899,36 @@ func (_m *Client) DeleteServiceFromLoadBalancer(_a0 context.Context, _a1 *hcloud
return r0
}
+// Client_DeleteServiceFromLoadBalancer_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'DeleteServiceFromLoadBalancer'
+type Client_DeleteServiceFromLoadBalancer_Call struct {
+ *mock.Call
+}
+
+// DeleteServiceFromLoadBalancer is a helper method to define mock.On call
+// - _a0 context.Context
+// - _a1 *hcloud.LoadBalancer
+// - _a2 int
+func (_e *Client_Expecter) DeleteServiceFromLoadBalancer(_a0 interface{}, _a1 interface{}, _a2 interface{}) *Client_DeleteServiceFromLoadBalancer_Call {
+ return &Client_DeleteServiceFromLoadBalancer_Call{Call: _e.mock.On("DeleteServiceFromLoadBalancer", _a0, _a1, _a2)}
+}
+
+func (_c *Client_DeleteServiceFromLoadBalancer_Call) Run(run func(_a0 context.Context, _a1 *hcloud.LoadBalancer, _a2 int)) *Client_DeleteServiceFromLoadBalancer_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(*hcloud.LoadBalancer), args[2].(int))
+ })
+ return _c
+}
+
+func (_c *Client_DeleteServiceFromLoadBalancer_Call) Return(_a0 error) *Client_DeleteServiceFromLoadBalancer_Call {
+ _c.Call.Return(_a0)
+ return _c
+}
+
+func (_c *Client_DeleteServiceFromLoadBalancer_Call) RunAndReturn(run func(context.Context, *hcloud.LoadBalancer, int) error) *Client_DeleteServiceFromLoadBalancer_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
// DeleteTargetServerOfLoadBalancer provides a mock function with given fields: _a0, _a1, _a2
func (_m *Client) DeleteTargetServerOfLoadBalancer(_a0 context.Context, _a1 *hcloud.LoadBalancer, _a2 *hcloud.Server) error {
ret := _m.Called(_a0, _a1, _a2)
@@ -407,6 +947,153 @@ func (_m *Client) DeleteTargetServerOfLoadBalancer(_a0 context.Context, _a1 *hcl
return r0
}
+// Client_DeleteTargetServerOfLoadBalancer_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'DeleteTargetServerOfLoadBalancer'
+type Client_DeleteTargetServerOfLoadBalancer_Call struct {
+ *mock.Call
+}
+
+// DeleteTargetServerOfLoadBalancer is a helper method to define mock.On call
+// - _a0 context.Context
+// - _a1 *hcloud.LoadBalancer
+// - _a2 *hcloud.Server
+func (_e *Client_Expecter) DeleteTargetServerOfLoadBalancer(_a0 interface{}, _a1 interface{}, _a2 interface{}) *Client_DeleteTargetServerOfLoadBalancer_Call {
+ return &Client_DeleteTargetServerOfLoadBalancer_Call{Call: _e.mock.On("DeleteTargetServerOfLoadBalancer", _a0, _a1, _a2)}
+}
+
+func (_c *Client_DeleteTargetServerOfLoadBalancer_Call) Run(run func(_a0 context.Context, _a1 *hcloud.LoadBalancer, _a2 *hcloud.Server)) *Client_DeleteTargetServerOfLoadBalancer_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(*hcloud.LoadBalancer), args[2].(*hcloud.Server))
+ })
+ return _c
+}
+
+func (_c *Client_DeleteTargetServerOfLoadBalancer_Call) Return(_a0 error) *Client_DeleteTargetServerOfLoadBalancer_Call {
+ _c.Call.Return(_a0)
+ return _c
+}
+
+func (_c *Client_DeleteTargetServerOfLoadBalancer_Call) RunAndReturn(run func(context.Context, *hcloud.LoadBalancer, *hcloud.Server) error) *Client_DeleteTargetServerOfLoadBalancer_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// EnableRescueSystem provides a mock function with given fields: _a0, _a1, _a2
+func (_m *Client) EnableRescueSystem(_a0 context.Context, _a1 *hcloud.Server, _a2 *hcloud.ServerEnableRescueOpts) (hcloud.ServerEnableRescueResult, error) {
+ ret := _m.Called(_a0, _a1, _a2)
+
+ if len(ret) == 0 {
+ panic("no return value specified for EnableRescueSystem")
+ }
+
+ var r0 hcloud.ServerEnableRescueResult
+ var r1 error
+ if rf, ok := ret.Get(0).(func(context.Context, *hcloud.Server, *hcloud.ServerEnableRescueOpts) (hcloud.ServerEnableRescueResult, error)); ok {
+ return rf(_a0, _a1, _a2)
+ }
+ if rf, ok := ret.Get(0).(func(context.Context, *hcloud.Server, *hcloud.ServerEnableRescueOpts) hcloud.ServerEnableRescueResult); ok {
+ r0 = rf(_a0, _a1, _a2)
+ } else {
+ r0 = ret.Get(0).(hcloud.ServerEnableRescueResult)
+ }
+
+ if rf, ok := ret.Get(1).(func(context.Context, *hcloud.Server, *hcloud.ServerEnableRescueOpts) error); ok {
+ r1 = rf(_a0, _a1, _a2)
+ } else {
+ r1 = ret.Error(1)
+ }
+
+ return r0, r1
+}
+
+// Client_EnableRescueSystem_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'EnableRescueSystem'
+type Client_EnableRescueSystem_Call struct {
+ *mock.Call
+}
+
+// EnableRescueSystem is a helper method to define mock.On call
+// - _a0 context.Context
+// - _a1 *hcloud.Server
+// - _a2 *hcloud.ServerEnableRescueOpts
+func (_e *Client_Expecter) EnableRescueSystem(_a0 interface{}, _a1 interface{}, _a2 interface{}) *Client_EnableRescueSystem_Call {
+ return &Client_EnableRescueSystem_Call{Call: _e.mock.On("EnableRescueSystem", _a0, _a1, _a2)}
+}
+
+func (_c *Client_EnableRescueSystem_Call) Run(run func(_a0 context.Context, _a1 *hcloud.Server, _a2 *hcloud.ServerEnableRescueOpts)) *Client_EnableRescueSystem_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(*hcloud.Server), args[2].(*hcloud.ServerEnableRescueOpts))
+ })
+ return _c
+}
+
+func (_c *Client_EnableRescueSystem_Call) Return(_a0 hcloud.ServerEnableRescueResult, _a1 error) *Client_EnableRescueSystem_Call {
+ _c.Call.Return(_a0, _a1)
+ return _c
+}
+
+func (_c *Client_EnableRescueSystem_Call) RunAndReturn(run func(context.Context, *hcloud.Server, *hcloud.ServerEnableRescueOpts) (hcloud.ServerEnableRescueResult, error)) *Client_EnableRescueSystem_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// GetAction provides a mock function with given fields: ctx, actionID
+func (_m *Client) GetAction(ctx context.Context, actionID int64) (*hcloud.Action, error) {
+ ret := _m.Called(ctx, actionID)
+
+ if len(ret) == 0 {
+ panic("no return value specified for GetAction")
+ }
+
+ var r0 *hcloud.Action
+ var r1 error
+ if rf, ok := ret.Get(0).(func(context.Context, int64) (*hcloud.Action, error)); ok {
+ return rf(ctx, actionID)
+ }
+ if rf, ok := ret.Get(0).(func(context.Context, int64) *hcloud.Action); ok {
+ r0 = rf(ctx, actionID)
+ } else {
+ if ret.Get(0) != nil {
+ r0 = ret.Get(0).(*hcloud.Action)
+ }
+ }
+
+ if rf, ok := ret.Get(1).(func(context.Context, int64) error); ok {
+ r1 = rf(ctx, actionID)
+ } else {
+ r1 = ret.Error(1)
+ }
+
+ return r0, r1
+}
+
+// Client_GetAction_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetAction'
+type Client_GetAction_Call struct {
+ *mock.Call
+}
+
+// GetAction is a helper method to define mock.On call
+// - ctx context.Context
+// - actionID int64
+func (_e *Client_Expecter) GetAction(ctx interface{}, actionID interface{}) *Client_GetAction_Call {
+ return &Client_GetAction_Call{Call: _e.mock.On("GetAction", ctx, actionID)}
+}
+
+func (_c *Client_GetAction_Call) Run(run func(ctx context.Context, actionID int64)) *Client_GetAction_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(int64))
+ })
+ return _c
+}
+
+func (_c *Client_GetAction_Call) Return(_a0 *hcloud.Action, _a1 error) *Client_GetAction_Call {
+ _c.Call.Return(_a0, _a1)
+ return _c
+}
+
+func (_c *Client_GetAction_Call) RunAndReturn(run func(context.Context, int64) (*hcloud.Action, error)) *Client_GetAction_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
// GetServer provides a mock function with given fields: _a0, _a1
func (_m *Client) GetServer(_a0 context.Context, _a1 int64) (*hcloud.Server, error) {
ret := _m.Called(_a0, _a1)
@@ -437,6 +1124,35 @@ func (_m *Client) GetServer(_a0 context.Context, _a1 int64) (*hcloud.Server, err
return r0, r1
}
+// Client_GetServer_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetServer'
+type Client_GetServer_Call struct {
+ *mock.Call
+}
+
+// GetServer is a helper method to define mock.On call
+// - _a0 context.Context
+// - _a1 int64
+func (_e *Client_Expecter) GetServer(_a0 interface{}, _a1 interface{}) *Client_GetServer_Call {
+ return &Client_GetServer_Call{Call: _e.mock.On("GetServer", _a0, _a1)}
+}
+
+func (_c *Client_GetServer_Call) Run(run func(_a0 context.Context, _a1 int64)) *Client_GetServer_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(int64))
+ })
+ return _c
+}
+
+func (_c *Client_GetServer_Call) Return(_a0 *hcloud.Server, _a1 error) *Client_GetServer_Call {
+ _c.Call.Return(_a0, _a1)
+ return _c
+}
+
+func (_c *Client_GetServer_Call) RunAndReturn(run func(context.Context, int64) (*hcloud.Server, error)) *Client_GetServer_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
// GetServerType provides a mock function with given fields: _a0, _a1
func (_m *Client) GetServerType(_a0 context.Context, _a1 string) (*hcloud.ServerType, error) {
ret := _m.Called(_a0, _a1)
@@ -467,6 +1183,35 @@ func (_m *Client) GetServerType(_a0 context.Context, _a1 string) (*hcloud.Server
return r0, r1
}
+// Client_GetServerType_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetServerType'
+type Client_GetServerType_Call struct {
+ *mock.Call
+}
+
+// GetServerType is a helper method to define mock.On call
+// - _a0 context.Context
+// - _a1 string
+func (_e *Client_Expecter) GetServerType(_a0 interface{}, _a1 interface{}) *Client_GetServerType_Call {
+ return &Client_GetServerType_Call{Call: _e.mock.On("GetServerType", _a0, _a1)}
+}
+
+func (_c *Client_GetServerType_Call) Run(run func(_a0 context.Context, _a1 string)) *Client_GetServerType_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(string))
+ })
+ return _c
+}
+
+func (_c *Client_GetServerType_Call) Return(_a0 *hcloud.ServerType, _a1 error) *Client_GetServerType_Call {
+ _c.Call.Return(_a0, _a1)
+ return _c
+}
+
+func (_c *Client_GetServerType_Call) RunAndReturn(run func(context.Context, string) (*hcloud.ServerType, error)) *Client_GetServerType_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
// ListImages provides a mock function with given fields: _a0, _a1
func (_m *Client) ListImages(_a0 context.Context, _a1 hcloud.ImageListOpts) ([]*hcloud.Image, error) {
ret := _m.Called(_a0, _a1)
@@ -497,6 +1242,35 @@ func (_m *Client) ListImages(_a0 context.Context, _a1 hcloud.ImageListOpts) ([]*
return r0, r1
}
+// Client_ListImages_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ListImages'
+type Client_ListImages_Call struct {
+ *mock.Call
+}
+
+// ListImages is a helper method to define mock.On call
+// - _a0 context.Context
+// - _a1 hcloud.ImageListOpts
+func (_e *Client_Expecter) ListImages(_a0 interface{}, _a1 interface{}) *Client_ListImages_Call {
+ return &Client_ListImages_Call{Call: _e.mock.On("ListImages", _a0, _a1)}
+}
+
+func (_c *Client_ListImages_Call) Run(run func(_a0 context.Context, _a1 hcloud.ImageListOpts)) *Client_ListImages_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(hcloud.ImageListOpts))
+ })
+ return _c
+}
+
+func (_c *Client_ListImages_Call) Return(_a0 []*hcloud.Image, _a1 error) *Client_ListImages_Call {
+ _c.Call.Return(_a0, _a1)
+ return _c
+}
+
+func (_c *Client_ListImages_Call) RunAndReturn(run func(context.Context, hcloud.ImageListOpts) ([]*hcloud.Image, error)) *Client_ListImages_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
// ListLoadBalancers provides a mock function with given fields: _a0, _a1
func (_m *Client) ListLoadBalancers(_a0 context.Context, _a1 hcloud.LoadBalancerListOpts) ([]*hcloud.LoadBalancer, error) {
ret := _m.Called(_a0, _a1)
@@ -527,6 +1301,35 @@ func (_m *Client) ListLoadBalancers(_a0 context.Context, _a1 hcloud.LoadBalancer
return r0, r1
}
+// Client_ListLoadBalancers_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ListLoadBalancers'
+type Client_ListLoadBalancers_Call struct {
+ *mock.Call
+}
+
+// ListLoadBalancers is a helper method to define mock.On call
+// - _a0 context.Context
+// - _a1 hcloud.LoadBalancerListOpts
+func (_e *Client_Expecter) ListLoadBalancers(_a0 interface{}, _a1 interface{}) *Client_ListLoadBalancers_Call {
+ return &Client_ListLoadBalancers_Call{Call: _e.mock.On("ListLoadBalancers", _a0, _a1)}
+}
+
+func (_c *Client_ListLoadBalancers_Call) Run(run func(_a0 context.Context, _a1 hcloud.LoadBalancerListOpts)) *Client_ListLoadBalancers_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(hcloud.LoadBalancerListOpts))
+ })
+ return _c
+}
+
+func (_c *Client_ListLoadBalancers_Call) Return(_a0 []*hcloud.LoadBalancer, _a1 error) *Client_ListLoadBalancers_Call {
+ _c.Call.Return(_a0, _a1)
+ return _c
+}
+
+func (_c *Client_ListLoadBalancers_Call) RunAndReturn(run func(context.Context, hcloud.LoadBalancerListOpts) ([]*hcloud.LoadBalancer, error)) *Client_ListLoadBalancers_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
// ListNetworks provides a mock function with given fields: _a0, _a1
func (_m *Client) ListNetworks(_a0 context.Context, _a1 hcloud.NetworkListOpts) ([]*hcloud.Network, error) {
ret := _m.Called(_a0, _a1)
@@ -557,6 +1360,35 @@ func (_m *Client) ListNetworks(_a0 context.Context, _a1 hcloud.NetworkListOpts)
return r0, r1
}
+// Client_ListNetworks_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ListNetworks'
+type Client_ListNetworks_Call struct {
+ *mock.Call
+}
+
+// ListNetworks is a helper method to define mock.On call
+// - _a0 context.Context
+// - _a1 hcloud.NetworkListOpts
+func (_e *Client_Expecter) ListNetworks(_a0 interface{}, _a1 interface{}) *Client_ListNetworks_Call {
+ return &Client_ListNetworks_Call{Call: _e.mock.On("ListNetworks", _a0, _a1)}
+}
+
+func (_c *Client_ListNetworks_Call) Run(run func(_a0 context.Context, _a1 hcloud.NetworkListOpts)) *Client_ListNetworks_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(hcloud.NetworkListOpts))
+ })
+ return _c
+}
+
+func (_c *Client_ListNetworks_Call) Return(_a0 []*hcloud.Network, _a1 error) *Client_ListNetworks_Call {
+ _c.Call.Return(_a0, _a1)
+ return _c
+}
+
+func (_c *Client_ListNetworks_Call) RunAndReturn(run func(context.Context, hcloud.NetworkListOpts) ([]*hcloud.Network, error)) *Client_ListNetworks_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
// ListPlacementGroups provides a mock function with given fields: _a0, _a1
func (_m *Client) ListPlacementGroups(_a0 context.Context, _a1 hcloud.PlacementGroupListOpts) ([]*hcloud.PlacementGroup, error) {
ret := _m.Called(_a0, _a1)
@@ -587,6 +1419,35 @@ func (_m *Client) ListPlacementGroups(_a0 context.Context, _a1 hcloud.PlacementG
return r0, r1
}
+// Client_ListPlacementGroups_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ListPlacementGroups'
+type Client_ListPlacementGroups_Call struct {
+ *mock.Call
+}
+
+// ListPlacementGroups is a helper method to define mock.On call
+// - _a0 context.Context
+// - _a1 hcloud.PlacementGroupListOpts
+func (_e *Client_Expecter) ListPlacementGroups(_a0 interface{}, _a1 interface{}) *Client_ListPlacementGroups_Call {
+ return &Client_ListPlacementGroups_Call{Call: _e.mock.On("ListPlacementGroups", _a0, _a1)}
+}
+
+func (_c *Client_ListPlacementGroups_Call) Run(run func(_a0 context.Context, _a1 hcloud.PlacementGroupListOpts)) *Client_ListPlacementGroups_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(hcloud.PlacementGroupListOpts))
+ })
+ return _c
+}
+
+func (_c *Client_ListPlacementGroups_Call) Return(_a0 []*hcloud.PlacementGroup, _a1 error) *Client_ListPlacementGroups_Call {
+ _c.Call.Return(_a0, _a1)
+ return _c
+}
+
+func (_c *Client_ListPlacementGroups_Call) RunAndReturn(run func(context.Context, hcloud.PlacementGroupListOpts) ([]*hcloud.PlacementGroup, error)) *Client_ListPlacementGroups_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
// ListSSHKeys provides a mock function with given fields: _a0, _a1
func (_m *Client) ListSSHKeys(_a0 context.Context, _a1 hcloud.SSHKeyListOpts) ([]*hcloud.SSHKey, error) {
ret := _m.Called(_a0, _a1)
@@ -617,6 +1478,35 @@ func (_m *Client) ListSSHKeys(_a0 context.Context, _a1 hcloud.SSHKeyListOpts) ([
return r0, r1
}
+// Client_ListSSHKeys_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ListSSHKeys'
+type Client_ListSSHKeys_Call struct {
+ *mock.Call
+}
+
+// ListSSHKeys is a helper method to define mock.On call
+// - _a0 context.Context
+// - _a1 hcloud.SSHKeyListOpts
+func (_e *Client_Expecter) ListSSHKeys(_a0 interface{}, _a1 interface{}) *Client_ListSSHKeys_Call {
+ return &Client_ListSSHKeys_Call{Call: _e.mock.On("ListSSHKeys", _a0, _a1)}
+}
+
+func (_c *Client_ListSSHKeys_Call) Run(run func(_a0 context.Context, _a1 hcloud.SSHKeyListOpts)) *Client_ListSSHKeys_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(hcloud.SSHKeyListOpts))
+ })
+ return _c
+}
+
+func (_c *Client_ListSSHKeys_Call) Return(_a0 []*hcloud.SSHKey, _a1 error) *Client_ListSSHKeys_Call {
+ _c.Call.Return(_a0, _a1)
+ return _c
+}
+
+func (_c *Client_ListSSHKeys_Call) RunAndReturn(run func(context.Context, hcloud.SSHKeyListOpts) ([]*hcloud.SSHKey, error)) *Client_ListSSHKeys_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
// ListServerTypes provides a mock function with given fields: _a0
func (_m *Client) ListServerTypes(_a0 context.Context) ([]*hcloud.ServerType, error) {
ret := _m.Called(_a0)
@@ -647,6 +1537,34 @@ func (_m *Client) ListServerTypes(_a0 context.Context) ([]*hcloud.ServerType, er
return r0, r1
}
+// Client_ListServerTypes_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ListServerTypes'
+type Client_ListServerTypes_Call struct {
+ *mock.Call
+}
+
+// ListServerTypes is a helper method to define mock.On call
+// - _a0 context.Context
+func (_e *Client_Expecter) ListServerTypes(_a0 interface{}) *Client_ListServerTypes_Call {
+ return &Client_ListServerTypes_Call{Call: _e.mock.On("ListServerTypes", _a0)}
+}
+
+func (_c *Client_ListServerTypes_Call) Run(run func(_a0 context.Context)) *Client_ListServerTypes_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context))
+ })
+ return _c
+}
+
+func (_c *Client_ListServerTypes_Call) Return(_a0 []*hcloud.ServerType, _a1 error) *Client_ListServerTypes_Call {
+ _c.Call.Return(_a0, _a1)
+ return _c
+}
+
+func (_c *Client_ListServerTypes_Call) RunAndReturn(run func(context.Context) ([]*hcloud.ServerType, error)) *Client_ListServerTypes_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
// ListServers provides a mock function with given fields: _a0, _a1
func (_m *Client) ListServers(_a0 context.Context, _a1 hcloud.ServerListOpts) ([]*hcloud.Server, error) {
ret := _m.Called(_a0, _a1)
@@ -677,6 +1595,35 @@ func (_m *Client) ListServers(_a0 context.Context, _a1 hcloud.ServerListOpts) ([
return r0, r1
}
+// Client_ListServers_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ListServers'
+type Client_ListServers_Call struct {
+ *mock.Call
+}
+
+// ListServers is a helper method to define mock.On call
+// - _a0 context.Context
+// - _a1 hcloud.ServerListOpts
+func (_e *Client_Expecter) ListServers(_a0 interface{}, _a1 interface{}) *Client_ListServers_Call {
+ return &Client_ListServers_Call{Call: _e.mock.On("ListServers", _a0, _a1)}
+}
+
+func (_c *Client_ListServers_Call) Run(run func(_a0 context.Context, _a1 hcloud.ServerListOpts)) *Client_ListServers_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(hcloud.ServerListOpts))
+ })
+ return _c
+}
+
+func (_c *Client_ListServers_Call) Return(_a0 []*hcloud.Server, _a1 error) *Client_ListServers_Call {
+ _c.Call.Return(_a0, _a1)
+ return _c
+}
+
+func (_c *Client_ListServers_Call) RunAndReturn(run func(context.Context, hcloud.ServerListOpts) ([]*hcloud.Server, error)) *Client_ListServers_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
// PowerOnServer provides a mock function with given fields: _a0, _a1
func (_m *Client) PowerOnServer(_a0 context.Context, _a1 *hcloud.Server) error {
ret := _m.Called(_a0, _a1)
@@ -695,6 +1642,94 @@ func (_m *Client) PowerOnServer(_a0 context.Context, _a1 *hcloud.Server) error {
return r0
}
+// Client_PowerOnServer_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'PowerOnServer'
+type Client_PowerOnServer_Call struct {
+ *mock.Call
+}
+
+// PowerOnServer is a helper method to define mock.On call
+// - _a0 context.Context
+// - _a1 *hcloud.Server
+func (_e *Client_Expecter) PowerOnServer(_a0 interface{}, _a1 interface{}) *Client_PowerOnServer_Call {
+ return &Client_PowerOnServer_Call{Call: _e.mock.On("PowerOnServer", _a0, _a1)}
+}
+
+func (_c *Client_PowerOnServer_Call) Run(run func(_a0 context.Context, _a1 *hcloud.Server)) *Client_PowerOnServer_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(*hcloud.Server))
+ })
+ return _c
+}
+
+func (_c *Client_PowerOnServer_Call) Return(_a0 error) *Client_PowerOnServer_Call {
+ _c.Call.Return(_a0)
+ return _c
+}
+
+func (_c *Client_PowerOnServer_Call) RunAndReturn(run func(context.Context, *hcloud.Server) error) *Client_PowerOnServer_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// Reboot provides a mock function with given fields: _a0, _a1
+func (_m *Client) Reboot(_a0 context.Context, _a1 *hcloud.Server) (*hcloud.Action, error) {
+ ret := _m.Called(_a0, _a1)
+
+ if len(ret) == 0 {
+ panic("no return value specified for Reboot")
+ }
+
+ var r0 *hcloud.Action
+ var r1 error
+ if rf, ok := ret.Get(0).(func(context.Context, *hcloud.Server) (*hcloud.Action, error)); ok {
+ return rf(_a0, _a1)
+ }
+ if rf, ok := ret.Get(0).(func(context.Context, *hcloud.Server) *hcloud.Action); ok {
+ r0 = rf(_a0, _a1)
+ } else {
+ if ret.Get(0) != nil {
+ r0 = ret.Get(0).(*hcloud.Action)
+ }
+ }
+
+ if rf, ok := ret.Get(1).(func(context.Context, *hcloud.Server) error); ok {
+ r1 = rf(_a0, _a1)
+ } else {
+ r1 = ret.Error(1)
+ }
+
+ return r0, r1
+}
+
+// Client_Reboot_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Reboot'
+type Client_Reboot_Call struct {
+ *mock.Call
+}
+
+// Reboot is a helper method to define mock.On call
+// - _a0 context.Context
+// - _a1 *hcloud.Server
+func (_e *Client_Expecter) Reboot(_a0 interface{}, _a1 interface{}) *Client_Reboot_Call {
+ return &Client_Reboot_Call{Call: _e.mock.On("Reboot", _a0, _a1)}
+}
+
+func (_c *Client_Reboot_Call) Run(run func(_a0 context.Context, _a1 *hcloud.Server)) *Client_Reboot_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(*hcloud.Server))
+ })
+ return _c
+}
+
+func (_c *Client_Reboot_Call) Return(_a0 *hcloud.Action, _a1 error) *Client_Reboot_Call {
+ _c.Call.Return(_a0, _a1)
+ return _c
+}
+
+func (_c *Client_Reboot_Call) RunAndReturn(run func(context.Context, *hcloud.Server) (*hcloud.Action, error)) *Client_Reboot_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
// RebootServer provides a mock function with given fields: _a0, _a1
func (_m *Client) RebootServer(_a0 context.Context, _a1 *hcloud.Server) error {
ret := _m.Called(_a0, _a1)
@@ -713,11 +1748,67 @@ func (_m *Client) RebootServer(_a0 context.Context, _a1 *hcloud.Server) error {
return r0
}
-// Reset provides a mock function with given fields:
+// Client_RebootServer_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'RebootServer'
+type Client_RebootServer_Call struct {
+ *mock.Call
+}
+
+// RebootServer is a helper method to define mock.On call
+// - _a0 context.Context
+// - _a1 *hcloud.Server
+func (_e *Client_Expecter) RebootServer(_a0 interface{}, _a1 interface{}) *Client_RebootServer_Call {
+ return &Client_RebootServer_Call{Call: _e.mock.On("RebootServer", _a0, _a1)}
+}
+
+func (_c *Client_RebootServer_Call) Run(run func(_a0 context.Context, _a1 *hcloud.Server)) *Client_RebootServer_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(*hcloud.Server))
+ })
+ return _c
+}
+
+func (_c *Client_RebootServer_Call) Return(_a0 error) *Client_RebootServer_Call {
+ _c.Call.Return(_a0)
+ return _c
+}
+
+func (_c *Client_RebootServer_Call) RunAndReturn(run func(context.Context, *hcloud.Server) error) *Client_RebootServer_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// Reset provides a mock function with no fields
func (_m *Client) Reset() {
_m.Called()
}
+// Client_Reset_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Reset'
+type Client_Reset_Call struct {
+ *mock.Call
+}
+
+// Reset is a helper method to define mock.On call
+func (_e *Client_Expecter) Reset() *Client_Reset_Call {
+ return &Client_Reset_Call{Call: _e.mock.On("Reset")}
+}
+
+func (_c *Client_Reset_Call) Run(run func()) *Client_Reset_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run()
+ })
+ return _c
+}
+
+func (_c *Client_Reset_Call) Return() *Client_Reset_Call {
+ _c.Call.Return()
+ return _c
+}
+
+func (_c *Client_Reset_Call) RunAndReturn(run func()) *Client_Reset_Call {
+ _c.Run(run)
+ return _c
+}
+
// ShutdownServer provides a mock function with given fields: _a0, _a1
func (_m *Client) ShutdownServer(_a0 context.Context, _a1 *hcloud.Server) error {
ret := _m.Called(_a0, _a1)
@@ -736,6 +1827,35 @@ func (_m *Client) ShutdownServer(_a0 context.Context, _a1 *hcloud.Server) error
return r0
}
+// Client_ShutdownServer_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ShutdownServer'
+type Client_ShutdownServer_Call struct {
+ *mock.Call
+}
+
+// ShutdownServer is a helper method to define mock.On call
+// - _a0 context.Context
+// - _a1 *hcloud.Server
+func (_e *Client_Expecter) ShutdownServer(_a0 interface{}, _a1 interface{}) *Client_ShutdownServer_Call {
+ return &Client_ShutdownServer_Call{Call: _e.mock.On("ShutdownServer", _a0, _a1)}
+}
+
+func (_c *Client_ShutdownServer_Call) Run(run func(_a0 context.Context, _a1 *hcloud.Server)) *Client_ShutdownServer_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(*hcloud.Server))
+ })
+ return _c
+}
+
+func (_c *Client_ShutdownServer_Call) Return(_a0 error) *Client_ShutdownServer_Call {
+ _c.Call.Return(_a0)
+ return _c
+}
+
+func (_c *Client_ShutdownServer_Call) RunAndReturn(run func(context.Context, *hcloud.Server) error) *Client_ShutdownServer_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
// UpdateLoadBalancer provides a mock function with given fields: _a0, _a1, _a2
func (_m *Client) UpdateLoadBalancer(_a0 context.Context, _a1 *hcloud.LoadBalancer, _a2 hcloud.LoadBalancerUpdateOpts) (*hcloud.LoadBalancer, error) {
ret := _m.Called(_a0, _a1, _a2)
@@ -766,6 +1886,36 @@ func (_m *Client) UpdateLoadBalancer(_a0 context.Context, _a1 *hcloud.LoadBalanc
return r0, r1
}
+// Client_UpdateLoadBalancer_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'UpdateLoadBalancer'
+type Client_UpdateLoadBalancer_Call struct {
+ *mock.Call
+}
+
+// UpdateLoadBalancer is a helper method to define mock.On call
+// - _a0 context.Context
+// - _a1 *hcloud.LoadBalancer
+// - _a2 hcloud.LoadBalancerUpdateOpts
+func (_e *Client_Expecter) UpdateLoadBalancer(_a0 interface{}, _a1 interface{}, _a2 interface{}) *Client_UpdateLoadBalancer_Call {
+ return &Client_UpdateLoadBalancer_Call{Call: _e.mock.On("UpdateLoadBalancer", _a0, _a1, _a2)}
+}
+
+func (_c *Client_UpdateLoadBalancer_Call) Run(run func(_a0 context.Context, _a1 *hcloud.LoadBalancer, _a2 hcloud.LoadBalancerUpdateOpts)) *Client_UpdateLoadBalancer_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(*hcloud.LoadBalancer), args[2].(hcloud.LoadBalancerUpdateOpts))
+ })
+ return _c
+}
+
+func (_c *Client_UpdateLoadBalancer_Call) Return(_a0 *hcloud.LoadBalancer, _a1 error) *Client_UpdateLoadBalancer_Call {
+ _c.Call.Return(_a0, _a1)
+ return _c
+}
+
+func (_c *Client_UpdateLoadBalancer_Call) RunAndReturn(run func(context.Context, *hcloud.LoadBalancer, hcloud.LoadBalancerUpdateOpts) (*hcloud.LoadBalancer, error)) *Client_UpdateLoadBalancer_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
// NewClient creates a new instance of Client. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
// The first argument is typically a *testing.T value.
func NewClient(t interface {
diff --git a/pkg/services/hcloud/client/mocks/Factory.go b/pkg/services/hcloud/client/mocks/Factory.go
index a3f29b977..d5c2ea527 100644
--- a/pkg/services/hcloud/client/mocks/Factory.go
+++ b/pkg/services/hcloud/client/mocks/Factory.go
@@ -1,4 +1,4 @@
-// Code generated by mockery v2.40.2. DO NOT EDIT.
+// Code generated by mockery v2.53.4. DO NOT EDIT.
package mocks
@@ -12,6 +12,14 @@ type Factory struct {
mock.Mock
}
+type Factory_Expecter struct {
+ mock *mock.Mock
+}
+
+func (_m *Factory) EXPECT() *Factory_Expecter {
+ return &Factory_Expecter{mock: &_m.Mock}
+}
+
// NewClient provides a mock function with given fields: hcloudToken
func (_m *Factory) NewClient(hcloudToken string) hcloudclient.Client {
ret := _m.Called(hcloudToken)
@@ -32,6 +40,34 @@ func (_m *Factory) NewClient(hcloudToken string) hcloudclient.Client {
return r0
}
+// Factory_NewClient_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'NewClient'
+type Factory_NewClient_Call struct {
+ *mock.Call
+}
+
+// NewClient is a helper method to define mock.On call
+// - hcloudToken string
+func (_e *Factory_Expecter) NewClient(hcloudToken interface{}) *Factory_NewClient_Call {
+ return &Factory_NewClient_Call{Call: _e.mock.On("NewClient", hcloudToken)}
+}
+
+func (_c *Factory_NewClient_Call) Run(run func(hcloudToken string)) *Factory_NewClient_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(string))
+ })
+ return _c
+}
+
+func (_c *Factory_NewClient_Call) Return(_a0 hcloudclient.Client) *Factory_NewClient_Call {
+ _c.Call.Return(_a0)
+ return _c
+}
+
+func (_c *Factory_NewClient_Call) RunAndReturn(run func(string) hcloudclient.Client) *Factory_NewClient_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
// NewFactory creates a new instance of Factory. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
// The first argument is typically a *testing.T value.
func NewFactory(t interface {
diff --git a/pkg/services/hcloud/loadbalancer/loadbalancer.go b/pkg/services/hcloud/loadbalancer/loadbalancer.go
index 45512ccf2..fcc10fff3 100644
--- a/pkg/services/hcloud/loadbalancer/loadbalancer.go
+++ b/pkg/services/hcloud/loadbalancer/loadbalancer.go
@@ -50,6 +50,9 @@ func NewService(scope *scope.ClusterScope) *Service {
// ErrNoLoadBalancerAvailable indicates that no available load balancer could be fond.
var ErrNoLoadBalancerAvailable = fmt.Errorf("no available load balancer")
+// ErrControlPlaneEndpointNotSet indicates that hetznercluster.spec.controlPlaneEndpoint is not set.
+var ErrControlPlaneEndpointNotSet = errors.New("hetznercluster.spec.controlPlaneEndpoint is not set")
+
// Reconcile implements the life cycle of HCloud load balancers.
func (s *Service) Reconcile(ctx context.Context) (reconcile.Result, error) {
// delete the deprecated condition from existing cluster objects
@@ -68,20 +71,37 @@ func (s *Service) Reconcile(ctx context.Context) (reconcile.Result, error) {
}
if lb == nil {
- // fixed name is set - we expect a load balancer with this name to exist
-
if s.scope.HetznerCluster.Spec.ControlPlaneLoadBalancer.Name != nil {
+ // fixed name is set - we expect a load balancer with this name to exist
lb, err = s.ownExistingLoadBalancer(ctx)
-
- // if load balancer is not found even though we expect it to exist, wait and reconcile until user creates it
- if errors.Is(err, ErrNoLoadBalancerAvailable) {
- return reconcile.Result{RequeueAfter: 1 * time.Minute}, nil
+ if err != nil {
+ // if load balancer is not found even though we expect it to exist, wait and reconcile until user creates it
+ if errors.Is(err, ErrNoLoadBalancerAvailable) {
+ return reconcile.Result{RequeueAfter: 1 * time.Minute}, nil
+ }
+ return reconcile.Result{}, fmt.Errorf("failed to own existing load balancer (name=%s): %w", *s.scope.HetznerCluster.Spec.ControlPlaneLoadBalancer.Name, err)
}
} else {
lb, err = s.createLoadBalancer(ctx)
- }
- if err != nil {
- return reconcile.Result{}, fmt.Errorf("failed to own/create load balancer: %w", err)
+ if err != nil {
+ if errors.Is(err, ErrControlPlaneEndpointNotSet) {
+ // When an external ControlPlane Provider gets used (Kamaji), it might
+ // need some time until the endpoint is available.
+ err = fmt.Errorf("requeue, waiting for control-plane endpoint to be set: %w",
+ err)
+ conditions.MarkFalse(
+ s.scope.HetznerCluster,
+ infrav1.LoadBalancerReadyCondition,
+ "MissingControlPlaneEndpoint",
+ clusterv1.ConditionSeverityWarning,
+ "%s",
+ err.Error(),
+ )
+ s.scope.Logger.Info(err.Error())
+ return reconcile.Result{RequeueAfter: 10 * time.Second}, nil
+ }
+ return reconcile.Result{}, fmt.Errorf("failed to create load balancer: %w", err)
+ }
}
}
@@ -287,7 +307,10 @@ func (s *Service) reconcileServices(ctx context.Context, lb *hcloud.LoadBalancer
}
func (s *Service) createLoadBalancer(ctx context.Context) (*hcloud.LoadBalancer, error) {
- opts := createOptsFromSpec(s.scope.HetznerCluster)
+ opts, err := createOptsFromSpec(s.scope.HetznerCluster)
+ if err != nil {
+ return nil, err
+ }
lb, err := s.scope.HCloudClient.CreateLoadBalancer(ctx, opts)
if err != nil {
err = fmt.Errorf("failed to create load balancer: %w", err)
@@ -309,7 +332,7 @@ func (s *Service) createLoadBalancer(ctx context.Context) (*hcloud.LoadBalancer,
return lb, nil
}
-func createOptsFromSpec(hc *infrav1.HetznerCluster) hcloud.LoadBalancerCreateOpts {
+func createOptsFromSpec(hc *infrav1.HetznerCluster) (hcloud.LoadBalancerCreateOpts, error) {
// gather algorithm type
algorithmType := hc.Spec.ControlPlaneLoadBalancer.Algorithm.HCloudAlgorithmType()
@@ -323,6 +346,10 @@ func createOptsFromSpec(hc *infrav1.HetznerCluster) hcloud.LoadBalancerCreateOpt
network = &hcloud.Network{ID: hc.Status.Network.ID}
}
+ if hc.Spec.ControlPlaneEndpoint == nil {
+ return hcloud.LoadBalancerCreateOpts{}, ErrControlPlaneEndpointNotSet
+ }
+
listenPort := int(hc.Spec.ControlPlaneEndpoint.Port)
publicInterface := true
return hcloud.LoadBalancerCreateOpts{
@@ -341,7 +368,7 @@ func createOptsFromSpec(hc *infrav1.HetznerCluster) hcloud.LoadBalancerCreateOpt
Proxyprotocol: &proxyprotocol,
},
},
- }
+ }, nil
}
// Delete implements the deletion of HCloud load balancers.
diff --git a/pkg/services/hcloud/loadbalancer/loadbalancer_test.go b/pkg/services/hcloud/loadbalancer/loadbalancer_test.go
index f939b1c2c..2fda88690 100644
--- a/pkg/services/hcloud/loadbalancer/loadbalancer_test.go
+++ b/pkg/services/hcloud/loadbalancer/loadbalancer_test.go
@@ -17,6 +17,8 @@ limitations under the License.
package loadbalancer
import (
+ "errors"
+
"github.com/go-logr/logr"
"github.com/hetznercloud/hcloud-go/v2/hcloud"
. "github.com/onsi/ginkgo/v2"
@@ -122,7 +124,8 @@ var _ = Describe("createOptsFromSpec", func() {
hetznerCluster.Status.Network = nil
wantCreateOpts.Network = nil
- createOpts := createOptsFromSpec(hetznerCluster)
+ createOpts, err := createOptsFromSpec(hetznerCluster)
+ Expect(err).To(BeNil())
// ignore random name
createOpts.Name = ""
@@ -131,7 +134,8 @@ var _ = Describe("createOptsFromSpec", func() {
})
It("creates specs for cluster with network", func() {
- createOpts := createOptsFromSpec(hetznerCluster)
+ createOpts, err := createOptsFromSpec(hetznerCluster)
+ Expect(err).To(BeNil())
// ignore random name
createOpts.Name = ""
@@ -142,7 +146,8 @@ var _ = Describe("createOptsFromSpec", func() {
It("creates specs for cluster without load balancer name set", func() {
hetznerCluster.Spec.ControlPlaneLoadBalancer.Name = nil
- createOpts := createOptsFromSpec(hetznerCluster)
+ createOpts, err := createOptsFromSpec(hetznerCluster)
+ Expect(err).To(BeNil())
// should generate correct name
Expect(createOpts.Name).To(HavePrefix("hetzner-cluster-kube-apiserver-"))
@@ -152,4 +157,11 @@ var _ = Describe("createOptsFromSpec", func() {
wantCreateOpts.Name = ""
Expect(createOpts).To(Equal(wantCreateOpts))
})
+
+ It("returns ErrControlPlaneEndpointNotSet", func() {
+ hetznerCluster.Spec.ControlPlaneEndpoint = nil
+
+ _, err := createOptsFromSpec(hetznerCluster)
+ Expect(errors.Is(err, ErrControlPlaneEndpointNotSet)).To(BeTrue())
+ })
})
diff --git a/pkg/services/hcloud/mockedsshclient/fakesshclient.go b/pkg/services/hcloud/mockedsshclient/fakesshclient.go
new file mode 100644
index 000000000..3e3a9690e
--- /dev/null
+++ b/pkg/services/hcloud/mockedsshclient/fakesshclient.go
@@ -0,0 +1,38 @@
+/*
+Copyright 2022 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package mockedsshclient implements functions to create mocked ssh clients for hcloud testing.
+package mockedsshclient
+
+import (
+ sshmock "github.com/syself/cluster-api-provider-hetzner/pkg/services/baremetal/client/mocks/ssh"
+ sshclient "github.com/syself/cluster-api-provider-hetzner/pkg/services/baremetal/client/ssh"
+)
+
+type mockedSSHClientFactory struct {
+ sshMockClient *sshmock.Client
+}
+
+func (f *mockedSSHClientFactory) NewClient(_ sshclient.Input) sshclient.Client {
+ return f.sshMockClient
+}
+
+// NewSSHFactory creates a new factory for SSH clients.
+func NewSSHFactory(sshMockClient *sshmock.Client) sshclient.Factory {
+ return &mockedSSHClientFactory{
+ sshMockClient: sshMockClient,
+ }
+}
diff --git a/pkg/services/hcloud/remediation/remediation.go b/pkg/services/hcloud/remediation/remediation.go
index 6b84830a2..aaead6c81 100644
--- a/pkg/services/hcloud/remediation/remediation.go
+++ b/pkg/services/hcloud/remediation/remediation.go
@@ -48,12 +48,16 @@ func NewService(scope *scope.HCloudRemediationScope) *Service {
// Reconcile implements reconcilement of HCloudRemediation.
func (s *Service) Reconcile(ctx context.Context) (res reconcile.Result, err error) {
- server, err := s.findServer(ctx)
- if err != nil {
- return reconcile.Result{}, fmt.Errorf("failed to find the server of unhealthy machine: %w", err)
+ var server *hcloud.Server
+ if s.scope.HCloudMachine.Spec.ProviderID != nil {
+ server, err = s.findServer(ctx)
+ if err != nil {
+ return reconcile.Result{}, fmt.Errorf("failed to find the server of unhealthy machine: %w", err)
+ }
}
- // stop remediation if server does not exist
+ // stop remediation if server does not exist or ProviderID is nil (in this case the server
+ // cannot exist).
if server == nil {
s.scope.HCloudRemediation.Status.Phase = infrav1.PhaseDeleting
@@ -61,7 +65,14 @@ func (s *Service) Reconcile(ctx context.Context) (res reconcile.Result, err erro
record.Warn(s.scope.HCloudRemediation, "FailedSettingConditionOnMachine", err.Error())
return reconcile.Result{}, fmt.Errorf("failed to set conditions on CAPI machine: %w", err)
}
- record.Warn(s.scope.HCloudRemediation, "ExitRemediation", "exit remediation because bare metal server does not exist")
+ providerID := "nil"
+ if s.scope.HCloudMachine.Spec.ProviderID != nil {
+ providerID = *s.scope.HCloudMachine.Spec.ProviderID
+ }
+ msg := fmt.Sprintf("exit remediation because hcloud server (providerID=%s) does not exist",
+ providerID)
+ s.scope.Logger.Error(nil, msg)
+ record.Warn(s.scope.HCloudRemediation, "ExitRemediation", msg)
return res, nil
}
diff --git a/pkg/services/hcloud/server/server.go b/pkg/services/hcloud/server/server.go
index a7e4b5964..00fd0ecd6 100644
--- a/pkg/services/hcloud/server/server.go
+++ b/pkg/services/hcloud/server/server.go
@@ -21,33 +21,44 @@ import (
"context"
"errors"
"fmt"
+ "net"
+ "syscall"
"time"
"github.com/hetznercloud/hcloud-go/v2/hcloud"
corev1 "k8s.io/api/core/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/types"
+ "k8s.io/utils/ptr"
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
controlplanev1 "sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1beta1"
- capierrors "sigs.k8s.io/cluster-api/errors"
+ capierrors "sigs.k8s.io/cluster-api/errors" //nolint:staticcheck // we will handle that, when we update to capi v1.11
"sigs.k8s.io/cluster-api/util/conditions"
"sigs.k8s.io/cluster-api/util/record"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
infrav1 "github.com/syself/cluster-api-provider-hetzner/api/v1beta1"
"github.com/syself/cluster-api-provider-hetzner/pkg/scope"
+ secretutil "github.com/syself/cluster-api-provider-hetzner/pkg/secrets"
+ sshclient "github.com/syself/cluster-api-provider-hetzner/pkg/services/baremetal/client/ssh"
hcloudutil "github.com/syself/cluster-api-provider-hetzner/pkg/services/hcloud/util"
"github.com/syself/cluster-api-provider-hetzner/pkg/utils"
)
const (
serverOffTimeout = 10 * time.Minute
-)
-var (
- errWrongLabel = fmt.Errorf("label is wrong")
- errMissingLabel = fmt.Errorf("label is missing")
- errServerCreateNotPossible = fmt.Errorf("server create not possible - need action")
+ // requeueImmediately gets used to requeue "now". One second gets used to make
+ // it unlikely that the next Reconcile reads stale data from the local cache.
+ requeueImmediately = 1 * time.Second
+
+ actionDone = -1
+
+ preRescueOSImage = "ubuntu-24.04"
)
+var errServerCreateNotPossible = fmt.Errorf("server create not possible - need action")
+
// Service defines struct with machine scope to reconcile HCloudMachines.
type Service struct {
scope *scope.MachineScope
@@ -90,69 +101,587 @@ func (s *Service) Reconcile(ctx context.Context) (res reconcile.Result, err erro
conditions.MarkTrue(s.scope.HCloudMachine, infrav1.BootstrapReadyCondition)
- // try to find an existing server
- server, err := s.findServer(ctx)
+ var server *hcloud.Server
+
+ if s.scope.HCloudMachine.Spec.ProviderID != nil {
+ server, err = s.findServer(ctx)
+ if err != nil {
+ return reconcile.Result{}, fmt.Errorf("findServer: %w", err)
+ }
+
+ // findServer will return both server and error as nil, if the server was not found.
+ if server == nil {
+ // The server did disappear in HCloud? Maybe it was delete via web-UI.
+ // We set MachineError. CAPI will delete machine.
+ msg := fmt.Sprintf("hcloud server (%q) no longer available. Setting MachineError.",
+ *s.scope.HCloudMachine.Spec.ProviderID)
+
+ s.scope.Logger.Error(errors.New(msg), msg,
+ "ProviderID", *s.scope.HCloudMachine.Spec.ProviderID,
+ "BootState", s.scope.HCloudMachine.Status.BootState,
+ "BootStateSince", s.scope.HCloudMachine.Status.BootStateSince,
+ )
+
+ s.scope.SetError(msg, capierrors.CreateMachineError)
+ s.scope.HCloudMachine.SetBootState(infrav1.HCloudBootStateUnset)
+ record.Warn(s.scope.HCloudMachine, "NoHCloudServerFound", msg)
+ conditions.MarkFalse(s.scope.HCloudMachine, infrav1.ServerAvailableCondition,
+ "NoHCloudServerFound", clusterv1.ConditionSeverityWarning,
+ "%s", msg)
+ // no need to requeue.
+ return reconcile.Result{}, nil
+ }
+ }
+
+ switch s.scope.HCloudMachine.Status.BootState {
+ case infrav1.HCloudBootStateUnset:
+ return s.handleBootStateUnset(ctx)
+ case infrav1.HCloudBootStateInitializing:
+ return s.handleBootStateInitializing(ctx, server)
+ case infrav1.HCloudBootStateEnablingRescue:
+ return s.handleBootStateEnablingRescue(ctx, server)
+ case infrav1.HCloudBootStateBootingToRescue:
+ return s.handleBootStateBootingToRescue(ctx, server)
+ case infrav1.HCloudBootStateRunningImageCommand:
+ return s.handleBootStateRunningImageCommand(ctx, server)
+ case infrav1.HCloudBootStateBootingToRealOS:
+ return s.handleBootingToRealOS(ctx, server)
+ case infrav1.HCloudBootStateOperatingSystemRunning:
+ return s.handleOperatingSystemRunning(ctx, server)
+ default:
+ return reconcile.Result{}, fmt.Errorf("unknown BootState: %s", s.scope.HCloudMachine.Status.BootState)
+ }
+}
+
+// handleBootStateUnset is first state for both ways (imageName/snapshot and imageURL).
+func (s *Service) handleBootStateUnset(ctx context.Context) (reconcile.Result, error) {
+ hm := s.scope.HCloudMachine
+
+ if hm.Status.BootStateSince.IsZero() {
+ hm.Status.BootStateSince = metav1.Now()
+ }
+
+ durationOfState := time.Since(hm.Status.BootStateSince.Time)
+ if durationOfState > 6*time.Minute {
+ // timeout. Something has failed.
+ msg := fmt.Sprintf("handleBootStateUnset timed out after %s. Deleting machine",
+ durationOfState.Round(time.Second).String())
+ s.scope.SetError(msg, capierrors.CreateMachineError)
+ s.scope.Logger.Error(nil, msg)
+ conditions.MarkFalse(hm, infrav1.ServerAvailableCondition,
+ "HandleBootStateUnsetTimedOut", clusterv1.ConditionSeverityWarning,
+ "%s", msg)
+ return reconcile.Result{}, nil
+ }
+
+ if hm.Spec.ProviderID != nil && *hm.Spec.ProviderID != "" && hm.Spec.ImageURL == "" {
+ // This machine seems to be an existing machine which was created before introducing
+ // Status.BootState.
+
+ var msg string
+ if !hm.Status.Ready {
+ hm.SetBootState(infrav1.HCloudBootStateBootingToRealOS)
+ } else {
+ hm.SetBootState(infrav1.HCloudBootStateOperatingSystemRunning)
+ }
+ msg = fmt.Sprintf("Updating old resource (pre BootState) %s", hm.Status.BootState)
+
+ s.scope.Logger.Info(msg)
+ conditions.MarkFalse(hm, infrav1.ServerAvailableCondition,
+ "HandleBootStateUnset", clusterv1.ConditionSeverityInfo,
+ "%s", msg)
+ return reconcile.Result{RequeueAfter: requeueImmediately}, nil
+ }
+
+ server, image, err := s.createServerFromImageNameOrURL(ctx)
if err != nil {
- return reconcile.Result{}, fmt.Errorf("failed to get server: %w", err)
+ if errors.Is(err, errServerCreateNotPossible) {
+ err = fmt.Errorf("createServerFromImageNameOrURL failed: %w", err)
+ s.scope.Logger.Error(err, "")
+ conditions.MarkFalse(hm, infrav1.ServerAvailableCondition,
+ "ServerCreateNotPossible", clusterv1.ConditionSeverityWarning,
+ "%s", err.Error())
+ return reconcile.Result{RequeueAfter: 5 * time.Minute}, nil
+ }
+ return reconcile.Result{}, fmt.Errorf("failed to create server: %w", err)
}
- // if no server is found we have to create one
- if server == nil {
- server, err = s.createServer(ctx)
+ updateHCloudMachineStatusFromServer(hm, server)
+
+ s.scope.SetProviderID(server.ID)
+
+ // Provisioning from a hcloud image like ubuntu-YY.MM takes roughly 11 seconds.
+ // Provisioning from a snapshot takes roughly 140 seconds.
+ // We do not want to do too many api-calls (rate-limiting). So we differentiate
+ // between both cases.
+ // These values get only used **once** after the server got created.
+
+ requeueAfter := 140 * time.Second
+ if image.RapidDeploy {
+ requeueAfter = 10 * time.Second
+ }
+ conditions.MarkFalse(hm, infrav1.ServerAvailableCondition,
+ "ProvisioningServer", clusterv1.ConditionSeverityInfo,
+ "Provisioning and rebooting server")
+ return reconcile.Result{RequeueAfter: requeueAfter}, nil
+}
+
+// handleBootStateInitializing is for provisioning with imageURL and image-url-command.
+func (s *Service) handleBootStateInitializing(ctx context.Context, server *hcloud.Server) (res reconcile.Result, reterr error) {
+ hm := s.scope.HCloudMachine
+
+ durationOfState := time.Since(hm.Status.BootStateSince.Time)
+ if durationOfState > 6*time.Minute {
+ // timeout. Something has failed.
+ msg := fmt.Sprintf("handleBootStateInitializing timed out after %s. Deleting machine",
+ durationOfState.Round(time.Second).String())
+ s.scope.SetError(msg, capierrors.CreateMachineError)
+ s.scope.Logger.Error(nil, msg)
+ conditions.MarkFalse(hm, infrav1.ServerAvailableCondition,
+ "BootStateInitializingTimedOut", clusterv1.ConditionSeverityWarning,
+ "%s", msg)
+ return reconcile.Result{}, nil
+ }
+
+ updateHCloudMachineStatusFromServer(hm, server)
+
+ // This is a new machine with imageURL. Do some pre-flight checks.
+ if s.scope.ImageURLCommand == "" {
+ msg := "imageURL is set, but the caph command is missing the --hcloud-image-url-command"
+ s.scope.Logger.Error(nil, msg)
+ conditions.MarkFalse(s.scope.HCloudMachine, infrav1.ServerAvailableCondition,
+ "ImageURLSetButNoCommandProvided", clusterv1.ConditionSeverityWarning,
+ "%s", msg)
+ // No need for Requeue, because adding the command line argument to the caph deployment,
+ // will restart the controller, and all resources will be reconciled.
+ return reconcile.Result{}, nil
+ }
+
+ // Check that we have valid ssh-private-key in the secret. A failure could also mean there is a
+ // network failure while trying to access the api-server.
+ _, err := s.getSSHPrivateKey(ctx)
+ if err != nil {
+ err = fmt.Errorf("getSSHPrivateKey failed: %w", err)
+ s.scope.Logger.Error(err, "")
+ conditions.MarkFalse(s.scope.HCloudMachine, infrav1.ServerAvailableCondition,
+ "GetSSHPrivateKeyFailed", clusterv1.ConditionSeverityWarning,
+ "%s", err.Error())
+ return reconcile.Result{RequeueAfter: 1 * time.Minute}, nil
+ }
+
+ // end of pre-flight checks.
+
+ // analyze status of server
+ switch server.Status {
+ case hcloud.ServerStatusStarting, hcloud.ServerStatusInitializing:
+ conditions.MarkFalse(hm, infrav1.ServerAvailableCondition,
+ "ServerNotRunningYet", clusterv1.ConditionSeverityInfo,
+ "hcloud server is %q", server.Status)
+ return reconcile.Result{RequeueAfter: 10 * time.Second}, nil
+ case hcloud.ServerStatusRunning:
+ // execute below code
+ default:
+ // some temporary status
+ s.scope.Logger.Info("Unknown hcloud server status", "status", server.Status)
+ conditions.MarkFalse(hm, infrav1.ServerAvailableCondition,
+ "UnknownHCloudStatus", clusterv1.ConditionSeverityInfo, "hcloud server has unknown status: %q", server.Status)
+ return reconcile.Result{RequeueAfter: 10 * time.Second}, nil
+ }
+
+ // Server is Running.
+
+ _, hcloudSSHKeys, err := s.getSSHKeys(ctx)
+ if err != nil {
+ return res, fmt.Errorf("getSSHKeys failed: %w", err)
+ }
+
+ rescueOpts := &hcloud.ServerEnableRescueOpts{
+ Type: hcloud.ServerRescueTypeLinux64,
+ SSHKeys: hcloudSSHKeys,
+ }
+ result, err := s.scope.HCloudClient.EnableRescueSystem(ctx, server, rescueOpts)
+ if err != nil {
+ return res, fmt.Errorf("EnableRescueSystem failed: %w", err)
+ }
+
+ // The API of hetzner is async. We get an Action-ID as result. We need to wait until the action
+ // is done. After that we can trigger the reboot, so that the machine boots into the rescue
+ // system.
+ hm.Status.ExternalIDs.ActionIDEnableRescueSystem = result.Action.ID
+
+ hm.SetBootState(infrav1.HCloudBootStateEnablingRescue)
+
+ conditions.MarkFalse(hm, infrav1.ServerAvailableCondition,
+ "WaitForRescueSystem", clusterv1.ConditionSeverityInfo,
+ "waiting for rescue system to be enabled")
+ return reconcile.Result{RequeueAfter: 4 * time.Second}, nil
+}
+
+// handleBootStateEnablingRescue is for provisioning with imageURL and image-url-command.
+func (s *Service) handleBootStateEnablingRescue(ctx context.Context, server *hcloud.Server) (reconcile.Result, error) {
+ hm := s.scope.HCloudMachine
+
+ durationOfState := time.Since(hm.Status.BootStateSince.Time)
+ if durationOfState > 6*time.Minute {
+ // timeout. Something has failed.
+ msg := fmt.Sprintf("handleBootStateEnablingRescue timed out after %s. Deleting machine",
+ durationOfState.Round(time.Second).String())
+ s.scope.Logger.Error(nil, msg)
+ s.scope.SetError(msg, capierrors.CreateMachineError)
+ conditions.MarkFalse(hm, infrav1.ServerAvailableCondition,
+ "EnablingRescueTimedOut", clusterv1.ConditionSeverityWarning, "%s", msg)
+ return reconcile.Result{}, nil
+ }
+
+ updateHCloudMachineStatusFromServer(hm, server)
+
+ if hm.Status.ExternalIDs.ActionIDEnableRescueSystem == 0 {
+ msg := "handleBootStateEnablingRescue ActionIdEnableRescueSystem not set? Can not continue. Provisioning Failed"
+ s.scope.Logger.Error(nil, msg)
+ s.scope.SetError(msg, capierrors.CreateMachineError)
+ conditions.MarkFalse(hm, infrav1.ServerAvailableCondition,
+ "ActionIDForEnablingRescueSystemNotSet", clusterv1.ConditionSeverityWarning, "%s", msg)
+ return reconcile.Result{}, nil
+ }
+
+ if hm.Status.ExternalIDs.ActionIDEnableRescueSystem != actionDone {
+ action, err := s.scope.HCloudClient.GetAction(ctx, hm.Status.ExternalIDs.ActionIDEnableRescueSystem)
if err != nil {
- if errors.Is(err, errServerCreateNotPossible) {
- return reconcile.Result{RequeueAfter: 5 * time.Minute}, nil
- }
- return reconcile.Result{}, fmt.Errorf("failed to create server: %w", err)
+ // If this error persists, then the BootState will time out, and a new
+ // machine will be created.
+ err = fmt.Errorf("GetAction failed: %w", err)
+ s.scope.Logger.Error(err, "")
+ conditions.MarkFalse(hm, infrav1.ServerAvailableCondition,
+ "EnablingRescueGetActionFailed", clusterv1.ConditionSeverityWarning,
+ "%s", err.Error())
+ return reconcile.Result{}, err
}
+
+ if action.Finished.IsZero() {
+ // not finished yet.
+ conditions.MarkFalse(hm, infrav1.ServerAvailableCondition,
+ "WaitingForEnablingRescueAction", clusterv1.ConditionSeverityInfo,
+ "Waiting until Action RescueEnabled is finished")
+ return reconcile.Result{RequeueAfter: 10 * time.Second}, nil
+ }
+
+ err = action.Error()
+ if err != nil {
+ err = fmt.Errorf("action %+v failed (wait for rescue enabled): %w", action, err)
+ s.scope.Logger.Error(err, "")
+ s.scope.SetError(err.Error(), capierrors.CreateMachineError)
+ conditions.MarkFalse(hm, infrav1.ServerAvailableCondition,
+ "EnablingRescueActionFailed", clusterv1.ConditionSeverityWarning,
+ "%s", err.Error())
+ return reconcile.Result{}, nil
+ }
+
+ s.scope.Logger.Info("Action RescueEnabled is finished",
+ "actionDuration", action.Finished.Sub(action.Started),
+ "finishedSince", time.Since(action.Finished),
+ "actionStatus", action.Status)
+
+ hm.Status.ExternalIDs.ActionIDEnableRescueSystem = actionDone
+ conditions.MarkFalse(hm, infrav1.ServerAvailableCondition,
+ "EnablingRescueActionDone", clusterv1.ConditionSeverityInfo,
+ "Action RescueEnabled is finished")
+ // When the reboot is triggered immediately after the action is finished,
+ // then the reboot might get ignored.
+ return reconcile.Result{RequeueAfter: 4 * time.Second}, nil
+ }
+
+ if !server.RescueEnabled {
+ msg := "rescue system is not enabled yet? Requeue"
+ s.scope.Logger.Error(nil, msg)
+ conditions.MarkFalse(hm, infrav1.ServerAvailableCondition,
+ "RescueNotEnabledYet", clusterv1.ConditionSeverityWarning,
+ "%s", msg)
+ return reconcile.Result{RequeueAfter: 10 * time.Second}, nil
}
- s.scope.SetProviderID(server.ID)
+ // Now we know that the rescue-system was enabled. Up to now the PreRescueOS is running. Next
+ // step is to reboot the server into the rescue system.
- // update HCloudMachineStatus
- c := s.scope.HCloudMachine.Status.Conditions.DeepCopy()
- sshKeys := s.scope.HCloudMachine.Status.SSHKeys
- s.scope.HCloudMachine.Status = statusFromHCloudServer(server)
- s.scope.SetRegion(failureDomain)
- s.scope.HCloudMachine.Status.Conditions = c
- s.scope.HCloudMachine.Status.SSHKeys = sshKeys
+ // Reboot via ssh, avoid API calls to hcloud (rate-limit)
+ sshClient, err := s.getSSHClient(ctx)
+ if err != nil {
+ err = fmt.Errorf("getSSHClient failed: %w", err)
+ s.scope.Logger.Error(err, "")
+ conditions.MarkFalse(hm, infrav1.ServerAvailableCondition,
+ "GetSSHClientFailed", clusterv1.ConditionSeverityWarning,
+ "%s", err.Error())
+ return reconcile.Result{RequeueAfter: 10 * time.Second}, nil
+ }
- // validate labels
- if err := validateLabels(server, s.createLabels()); err != nil {
- err := fmt.Errorf("could not validate labels of HCloud server: %w", err)
- s.scope.SetError(err.Error(), capierrors.CreateMachineError)
- return res, nil
+ err = sshClient.Reboot().Err
+ if err != nil {
+ if errors.Is(err, syscall.ECONNREFUSED) {
+ // ssh connection refused is common while the rescue system starts.
+ // Provide a nice message.
+ conditions.MarkFalse(hm, infrav1.ServerAvailableCondition,
+ "RetryingSSHConnection",
+ clusterv1.ConditionSeverityInfo, "Rebooting")
+ return reconcile.Result{RequeueAfter: 10 * time.Second}, nil
+ }
+
+ err = fmt.Errorf("reboot to rescue: reboot via ssh failed: %w", err)
+ s.scope.Logger.Error(err, "")
+ conditions.MarkFalse(hm, infrav1.ServerAvailableCondition,
+ "RebootViaSSHFailed",
+ clusterv1.ConditionSeverityWarning, "%s", err.Error())
+ return reconcile.Result{RequeueAfter: 10 * time.Second}, nil
+ }
+
+ s.scope.Logger.Info("Reboot started (via ssh)")
+
+ hm.SetBootState(infrav1.HCloudBootStateBootingToRescue)
+ conditions.MarkFalse(hm, infrav1.ServerAvailableCondition,
+ "BootingToRescue", clusterv1.ConditionSeverityInfo,
+ "reboot to rescue started")
+ return reconcile.Result{RequeueAfter: 10 * time.Second}, nil
+}
+
+// handleBootStateBootingToRescue is for provisioning with imageURL and image-url-command.
+func (s *Service) handleBootStateBootingToRescue(ctx context.Context, server *hcloud.Server) (reconcile.Result, error) {
+ hm := s.scope.HCloudMachine
+ updateHCloudMachineStatusFromServer(hm, server)
+
+ durationOfState := time.Since(hm.Status.BootStateSince.Time)
+ if durationOfState > 6*time.Minute {
+ // timeout. Something has failed.
+ msg := fmt.Sprintf("reaching rescue system has timed out after %s. Deleting machine",
+ durationOfState.Round(time.Second).String())
+ s.scope.SetError(msg, capierrors.CreateMachineError)
+ s.scope.Logger.Error(nil, msg)
+ conditions.MarkFalse(hm, infrav1.ServerAvailableCondition,
+ "BootingToRescueTimedOut", clusterv1.ConditionSeverityWarning,
+ "%s", msg)
+ return reconcile.Result{}, nil
+ }
+
+ if server.RescueEnabled {
+ msg := "Waiting until RescueEnabled is false"
+ s.scope.Logger.Info(msg)
+ conditions.MarkFalse(hm, infrav1.ServerAvailableCondition,
+ "WaitForRescueEnabledToBeFalse", clusterv1.ConditionSeverityInfo,
+ "%s", msg)
+ return reconcile.Result{RequeueAfter: 10 * time.Second}, nil
+ }
+
+ sshClient, err := s.getSSHClient(ctx)
+ if err != nil {
+ return reconcile.Result{}, fmt.Errorf("getSSHClient failed (waiting for rescue running): %w", err)
+ }
+
+ output := sshClient.GetHostName()
+ err = output.Err
+ if err != nil {
+ var msg string
+ if errors.Is(err, syscall.ECONNREFUSED) {
+ // This is common. Provide a nice message.
+ msg = "getHostName: ssh not reachable yet. Retrying"
+ conditions.MarkFalse(hm, infrav1.ServerAvailableCondition,
+ "RetryingSSHConnection", clusterv1.ConditionSeverityInfo,
+ "%s", msg)
+ return reconcile.Result{RequeueAfter: 5 * time.Second}, nil
+ }
+ err = fmt.Errorf("get hostname failed: %w", err)
+ s.scope.Logger.Error(err, "")
+ conditions.MarkFalse(hm, infrav1.ServerAvailableCondition,
+ "GetHostnameFailed", clusterv1.ConditionSeverityWarning,
+ "%s", err.Error())
+ return reconcile.Result{RequeueAfter: 5 * time.Second}, nil
+ }
+
+ conditions.MarkTrue(hm, infrav1.ServerCreateSucceededCondition)
+
+ remoteHostName := output.String()
+
+ if remoteHostName != "rescue" {
+ msg := fmt.Sprintf("Remote hostname (via ssh) of hcloud server is %q. Expected 'rescue'. Deleting hcloud machine", remoteHostName)
+ s.scope.Logger.Error(nil, msg)
+ s.scope.SetError(msg, capierrors.CreateMachineError)
+ conditions.MarkFalse(hm, infrav1.ServerAvailableCondition,
+ "UnexpectedHostname", clusterv1.ConditionSeverityWarning,
+ "%s", msg)
+ return reconcile.Result{}, nil
+ }
+
+ // Now we know that we are inside a rescue system.
+ // image-url-command has not started yet. Start it.
+
+ data, err := s.scope.GetRawBootstrapData(ctx)
+ if err != nil {
+ return reconcile.Result{}, fmt.Errorf("hcloud GetRawBootstrapData failed: %w", err)
+ }
+ exitStatus, stdoutStderr, err := sshClient.StartImageURLCommand(ctx, s.scope.ImageURLCommand, hm.Spec.ImageURL, data, s.scope.Name(), []string{"sda"})
+ if err != nil {
+ err := fmt.Errorf("StartImageURLCommand failed (retrying): %w", err)
+ // This could be a temporary network error. Retry.
+ s.scope.Logger.Error(err, "",
+ "ImageURLCommand", s.scope.ImageURLCommand,
+ "exitStatus", exitStatus,
+ "stdoutStderr", stdoutStderr)
+ conditions.MarkFalse(hm, infrav1.ServerAvailableCondition,
+ "StartImageURLCommandFailed", clusterv1.ConditionSeverityWarning,
+ "%s", err.Error())
+ return reconcile.Result{}, err
+ }
+
+ if exitStatus != 0 {
+ msg := "StartImageURLCommand failed with non-zero exit status. Deleting machine"
+ s.scope.Logger.Error(nil, msg,
+ "ImageURLCommand", s.scope.ImageURLCommand,
+ "exitStatus", exitStatus,
+ "stdoutStderr", stdoutStderr)
+ s.scope.SetError(msg, capierrors.CreateMachineError)
+ conditions.MarkFalse(hm, infrav1.ServerAvailableCondition,
+ "StartImageURLCommandNoZeroExitCode", clusterv1.ConditionSeverityWarning,
+ "%s", msg)
+ return reconcile.Result{}, nil
+ }
+
+ conditions.MarkFalse(hm, infrav1.ServerAvailableCondition,
+ "HCloudImageURLCommandRunning", clusterv1.ConditionSeverityInfo,
+ "hcloud-image-url-command running")
+ hm.SetBootState(infrav1.HCloudBootStateRunningImageCommand)
+ return reconcile.Result{RequeueAfter: 55 * time.Second}, nil
+}
+
+// handleBootStateRunningImageCommand is for provisioning with imageURL and image-url-command.
+func (s *Service) handleBootStateRunningImageCommand(ctx context.Context, server *hcloud.Server) (res reconcile.Result, err error) {
+ hm := s.scope.HCloudMachine
+ updateHCloudMachineStatusFromServer(hm, server)
+
+ hcloudSSHClient, err := s.getSSHClient(ctx)
+ if err != nil {
+ return reconcile.Result{}, fmt.Errorf("getSSHClient failed (wait for image-url-command): %w", err)
+ }
+
+ state, logFile, err := hcloudSSHClient.StateOfImageURLCommand()
+ if err != nil {
+ return reconcile.Result{}, fmt.Errorf("StateOfImageURLCommand failed: %w", err)
+ }
+
+ durationOfState := time.Since(hm.Status.BootStateSince.Time)
+ // Please keep the number (7) in sync with the docstring of ImageURL.
+ if durationOfState > 7*time.Minute {
+ // timeout. Something has failed.
+ msg := fmt.Sprintf("ImageURLCommand timed out after %s. Deleting machine",
+ durationOfState.Round(time.Second).String())
+ err = errors.New(msg)
+ s.scope.Logger.Error(err, "", "logFile", logFile)
+ s.scope.SetError(msg, capierrors.CreateMachineError)
+ record.Warn(hm, "ImageURLCommandFailed", logFile)
+ conditions.MarkFalse(hm, infrav1.ServerAvailableCondition,
+ "RunningImageCommandTimedOut", clusterv1.ConditionSeverityWarning,
+ "%s", msg)
+ return reconcile.Result{}, nil
+ }
+
+ switch state {
+ case sshclient.ImageURLCommandStateRunning:
+ conditions.MarkFalse(hm, infrav1.ServerAvailableCondition,
+ "HCloudImageURLCommandRunning", clusterv1.ConditionSeverityInfo,
+ "hcloud-image-url-command running")
+ return reconcile.Result{RequeueAfter: 5 * time.Second}, nil
+
+ case sshclient.ImageURLCommandStateFinishedSuccessfully:
+ record.Event(hm, "ImageURLCommandSuccessful", logFile)
+
+ // The image got installed. Now reboot in the real operating system.
+ if hcloudSSHClient.Reboot().Err != nil {
+ return reconcile.Result{}, fmt.Errorf("reboot after ImageURLCommand failed: %w",
+ err)
+ }
+
+ hm.SetBootState(infrav1.HCloudBootStateBootingToRealOS)
+
+ conditions.MarkFalse(hm, infrav1.ServerAvailableCondition,
+ "BootingToRealOS", clusterv1.ConditionSeverityInfo,
+ "Operating system of node is booting")
+
+ return reconcile.Result{RequeueAfter: requeueImmediately}, nil
+
+ case sshclient.ImageURLCommandStateFailed:
+ msg := "ImageURLCommand failed. Deleting machine"
+ err = errors.New(msg)
+ s.scope.Logger.Error(err, "", "logFile", logFile)
+ s.scope.SetError(msg, capierrors.CreateMachineError)
+ conditions.MarkFalse(hm, infrav1.ServerAvailableCondition,
+ "ImageCommandFailed", clusterv1.ConditionSeverityWarning,
+ "%s", msg)
+ return reconcile.Result{}, nil
+
+ case sshclient.ImageURLCommandStateNotStarted:
+ return reconcile.Result{}, fmt.Errorf("image-url-command not started in BootState %q? Should not happen",
+ state)
+
+ default:
+ return reconcile.Result{}, fmt.Errorf("unknown ImageURLCommandState: %q", state)
+ }
+}
+
+// handleBootingToRealOS is used for both ways (imageName/snapshot and imageURL).
+func (s *Service) handleBootingToRealOS(ctx context.Context, server *hcloud.Server) (res reconcile.Result, err error) {
+ hm := s.scope.HCloudMachine
+ updateHCloudMachineStatusFromServer(hm, server)
+
+ durationOfState := time.Since(hm.Status.BootStateSince.Time)
+ if durationOfState > 6*time.Minute {
+ // timeout. Something has failed.
+ msg := fmt.Sprintf("handleBootingToRealOS timed out after %s. Deleting machine",
+ durationOfState.Round(time.Second).String())
+ s.scope.SetError(msg, capierrors.CreateMachineError)
+ s.scope.Logger.Error(nil, msg)
+ conditions.MarkFalse(hm, infrav1.ServerAvailableCondition,
+ "BootingToRealOSTimedOut", clusterv1.ConditionSeverityWarning,
+ "%s", msg)
+ return reconcile.Result{}, nil
}
// analyze status of server
switch server.Status {
case hcloud.ServerStatusOff:
return s.handleServerStatusOff(ctx, server)
- case hcloud.ServerStatusStarting:
- // Requeue here so that server does not switch back and forth between off and starting.
- // If we don't return here, the condition ServerAvailable would get marked as true in this
- // case. However, if the server is stuck and does not power on, we should not mark the
- // condition ServerAvailable as true to be able to remediate the server after a timeout.
- conditions.MarkFalse(
- s.scope.HCloudMachine,
- infrav1.ServerAvailableCondition,
- infrav1.ServerStartingReason,
- clusterv1.ConditionSeverityInfo,
- "server is starting",
- )
- return reconcile.Result{RequeueAfter: 1 * time.Minute}, nil
- case hcloud.ServerStatusRunning: // do nothing
+
+ case hcloud.ServerStatusStarting, hcloud.ServerStatusInitializing:
+ conditions.MarkFalse(hm, infrav1.ServerAvailableCondition,
+ "BootingToRealOS", clusterv1.ConditionSeverityInfo,
+ "Operating system of node is booting")
+ return reconcile.Result{RequeueAfter: 10 * time.Second}, nil
+
+ case hcloud.ServerStatusRunning:
+ hm.SetBootState(infrav1.HCloudBootStateOperatingSystemRunning)
+ conditions.MarkFalse(hm, infrav1.ServerAvailableCondition,
+ "RealOSRunning", clusterv1.ConditionSeverityInfo,
+ "hcloud server status: %s", server.Status)
+ // Show changes in Status and go to next BootState.
+ return reconcile.Result{RequeueAfter: requeueImmediately}, nil
+
default:
- // some temporary status
- s.scope.SetReady(false)
+ msg := fmt.Sprintf("hcloud server status unknown: %s", server.Status)
+ s.scope.Logger.Error(nil, msg)
+ conditions.MarkFalse(hm, infrav1.ServerAvailableCondition,
+ "UnknownServerStatus", clusterv1.ConditionSeverityWarning,
+ "%s", msg)
return reconcile.Result{RequeueAfter: 10 * time.Second}, nil
}
+}
+
+// handleOperatingSystemRunning is the final state. It is used for both ways (imageName/snapshot and imageURL).
+func (s *Service) handleOperatingSystemRunning(ctx context.Context, server *hcloud.Server) (res reconcile.Result, err error) {
+ hm := s.scope.HCloudMachine
+ updateHCloudMachineStatusFromServer(hm, server)
+
+ // Clean up old Status fields
+ hm.Status.ExternalIDs.ActionIDEnableRescueSystem = 0
// check whether server is attached to the network
if err := s.reconcileNetworkAttachment(ctx, server); err != nil {
reterr := fmt.Errorf("failed to reconcile network attachment: %w", err)
conditions.MarkFalse(
- s.scope.HCloudMachine,
+ hm,
infrav1.ServerAvailableCondition,
infrav1.NetworkAttachFailedReason,
clusterv1.ConditionSeverityError,
@@ -164,7 +693,7 @@ func (s *Service) Reconcile(ctx context.Context) (res reconcile.Result, err erro
// nothing to do any more for worker nodes
if !s.scope.IsControlPlane() {
- conditions.MarkTrue(s.scope.HCloudMachine, infrav1.ServerAvailableCondition)
+ conditions.MarkTrue(hm, infrav1.ServerAvailableCondition)
s.scope.SetReady(true)
return res, nil
}
@@ -174,7 +703,7 @@ func (s *Service) Reconcile(ctx context.Context) (res reconcile.Result, err erro
if err != nil {
reterr := fmt.Errorf("failed to reconcile load balancer attachment: %w", err)
conditions.MarkFalse(
- s.scope.HCloudMachine,
+ hm,
infrav1.ServerAvailableCondition,
infrav1.LoadBalancerAttachFailedReason,
clusterv1.ConditionSeverityError,
@@ -185,9 +714,8 @@ func (s *Service) Reconcile(ctx context.Context) (res reconcile.Result, err erro
}
s.scope.SetReady(true)
- conditions.MarkTrue(s.scope.HCloudMachine, infrav1.ServerAvailableCondition)
-
- return res, nil
+ conditions.MarkTrue(hm, infrav1.ServerAvailableCondition)
+ return reconcile.Result{}, nil
}
// implements setting rate limit on hcloudmachine.
@@ -216,9 +744,13 @@ func (s *Service) Delete(ctx context.Context) (res reconcile.Result, err error)
// if no server has been found, then nothing can be deleted
if server == nil {
- msg := fmt.Sprintf("Unable to delete HCloud server. Could not find matching server for %s", s.scope.Name())
+ providerID := "nil"
+ if s.scope.HCloudMachine.Spec.ProviderID != nil {
+ providerID = *s.scope.HCloudMachine.Spec.ProviderID
+ }
+ msg := fmt.Sprintf("Unable to delete HCloud server. Could not find matching server for %s. ProviderID: %q", s.scope.Name(), providerID)
s.scope.V(1).Info(msg)
- record.Warnf(s.scope.HCloudMachine, "NoInstanceFound", msg)
+ record.Warn(s.scope.HCloudMachine, "NoInstanceFound", msg)
return res, nil
}
@@ -273,6 +805,8 @@ func (s *Service) reconcileNetworkAttachment(ctx context.Context, server *hcloud
}
func (s *Service) reconcileLoadBalancerAttachment(ctx context.Context, server *hcloud.Server) (reconcile.Result, error) {
+ hm := s.scope.HCloudMachine
+
if s.scope.HetznerCluster.Status.ControlPlaneLoadBalancer == nil {
return reconcile.Result{}, nil
}
@@ -314,6 +848,9 @@ func (s *Service) reconcileLoadBalancerAttachment(ctx context.Context, server *h
// we attach only nodes with kube-apiserver pod healthy to avoid downtime, skipped for the first node
if len(s.scope.HetznerCluster.Status.ControlPlaneLoadBalancer.Target) > 0 && !apiServerPodHealthy {
+ conditions.MarkFalse(hm, infrav1.ServerAvailableCondition,
+ "WaitingForAPIServer", clusterv1.ConditionSeverityInfo,
+ "reconcile LoadBalancer: apiserver pod not healthy yet.")
return reconcile.Result{RequeueAfter: 30 * time.Second}, nil
}
@@ -342,23 +879,75 @@ func (s *Service) reconcileLoadBalancerAttachment(ctx context.Context, server *h
return reconcile.Result{}, nil
}
-func (s *Service) createServer(ctx context.Context) (*hcloud.Server, error) {
- // get userData
+func (s *Service) createServerFromImageNameOrURL(ctx context.Context) (*hcloud.Server, *hcloud.Image, error) {
+ if s.scope.HCloudMachine.Spec.ImageName != "" {
+ return s.createServerFromImageName(ctx)
+ }
+ return s.createServerFromImageURL(ctx)
+}
+
+func (s *Service) createServerFromImageURL(ctx context.Context) (*hcloud.Server, *hcloud.Image, error) {
+ // Validate that ImageURLCommand is given
+ hm := s.scope.HCloudMachine
+
+ image, err := s.getServerImage(ctx, preRescueOSImage)
+ if err != nil {
+ err = fmt.Errorf("failed to get pre-rescue-OS server image %q: %w", preRescueOSImage, err)
+ msg := err.Error()
+ record.Warn(hm, "FailedGetServerImage", msg)
+ s.scope.Logger.Error(nil, msg)
+ conditions.MarkFalse(hm, infrav1.ServerAvailableCondition,
+ "GetServerImageFailed", clusterv1.ConditionSeverityWarning,
+ "%s", msg)
+ return nil, nil, err
+ }
+
+ server, err := s.createServer(ctx, nil, image)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ s.scope.HCloudMachine.SetBootState(infrav1.HCloudBootStateInitializing)
+ return server, image, nil
+}
+
+func (s *Service) createServerFromImageName(ctx context.Context) (*hcloud.Server, *hcloud.Image, error) {
+ hm := s.scope.HCloudMachine
userData, err := s.scope.GetRawBootstrapData(ctx)
if err != nil {
- record.Warnf(
- s.scope.HCloudMachine,
- "FailedGetBootstrapData",
- err.Error(),
- )
- return nil, fmt.Errorf("failed to get raw bootstrap data: %s", err)
+ err = fmt.Errorf("failed to get raw bootstrap data: %s", err)
+ msg := err.Error()
+ record.Warn(hm, "FailedGetBootstrapData", msg)
+ s.scope.Logger.Error(nil, msg)
+ conditions.MarkFalse(hm, infrav1.ServerAvailableCondition,
+ "GetRawBootstrapDataFailed", clusterv1.ConditionSeverityWarning,
+ "%s", msg)
+ return nil, nil, err
}
- image, err := s.getServerImage(ctx)
+ image, err := s.getServerImage(ctx, hm.Spec.ImageName)
if err != nil {
- return nil, fmt.Errorf("failed to get server image: %w", err)
+ err = fmt.Errorf("create server from imageName (%q): %w", hm.Spec.ImageName, err)
+ msg := err.Error()
+ record.Warn(hm, "FailedGetServerImage", msg)
+ s.scope.Logger.Error(nil, msg)
+ conditions.MarkFalse(hm, infrav1.ServerAvailableCondition,
+ "GetServerImageFailed", clusterv1.ConditionSeverityWarning,
+ "%s", msg)
+ return nil, nil, err
}
+ server, err := s.createServer(ctx, userData, image)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ hm.SetBootState(infrav1.HCloudBootStateBootingToRealOS)
+ return server, image, nil
+}
+
+func (s *Service) createServer(ctx context.Context, userData []byte, image *hcloud.Image) (*hcloud.Server, error) {
+ hm := s.scope.HCloudMachine
automount := false
startAfterCreate := true
opts := hcloud.ServerCreateOpts{
@@ -366,25 +955,25 @@ func (s *Service) createServer(ctx context.Context) (*hcloud.Server, error) {
Labels: s.createLabels(),
Image: image,
Location: &hcloud.Location{
- Name: string(s.scope.HCloudMachine.Status.Region),
+ Name: string(hm.Status.Region),
},
ServerType: &hcloud.ServerType{
- Name: string(s.scope.HCloudMachine.Spec.Type),
+ Name: string(hm.Spec.Type),
},
Automount: &automount,
StartAfterCreate: &startAfterCreate,
UserData: string(userData),
PublicNet: &hcloud.ServerCreatePublicNet{
- EnableIPv4: s.scope.HCloudMachine.Spec.PublicNetwork.EnableIPv4,
- EnableIPv6: s.scope.HCloudMachine.Spec.PublicNetwork.EnableIPv6,
+ EnableIPv4: hm.Spec.PublicNetwork.EnableIPv4,
+ EnableIPv6: hm.Spec.PublicNetwork.EnableIPv6,
},
}
// set placement group if necessary
- if s.scope.HCloudMachine.Spec.PlacementGroupName != nil {
+ if hm.Spec.PlacementGroupName != nil {
var foundPlacementGroupInStatus bool
for _, pgSts := range s.scope.HetznerCluster.Status.HCloudPlacementGroups {
- if *s.scope.HCloudMachine.Spec.PlacementGroupName == pgSts.Name {
+ if *hm.Spec.PlacementGroupName == pgSts.Name {
foundPlacementGroupInStatus = true
opts.PlacementGroup = &hcloud.PlacementGroup{
ID: pgSts.ID,
@@ -394,33 +983,93 @@ func (s *Service) createServer(ctx context.Context) (*hcloud.Server, error) {
}
}
if !foundPlacementGroupInStatus {
- conditions.MarkFalse(s.scope.HCloudMachine,
+ conditions.MarkFalse(hm,
infrav1.ServerCreateSucceededCondition,
infrav1.InstanceHasNonExistingPlacementGroupReason,
clusterv1.ConditionSeverityError,
"Placement group %q does not exist in cluster",
- *s.scope.HCloudMachine.Spec.PlacementGroupName,
+ *hm.Spec.PlacementGroupName,
)
return nil, errServerCreateNotPossible
}
}
- sshKeySpecs := s.scope.HCloudMachine.Spec.SSHKeys
+ caphSSHKeys, hcloudSSHKeys, err := s.getSSHKeys(ctx)
+ if err != nil {
+ return nil, err
+ }
+ opts.SSHKeys = hcloudSSHKeys
+
+ // set up network if available
+ if net := s.scope.HetznerCluster.Status.Network; net != nil {
+ opts.Networks = []*hcloud.Network{{
+ ID: net.ID,
+ }}
+ }
+
+ // if no private network exists, there must be an IPv4 for the load balancer
+ if !s.scope.HetznerCluster.Spec.HCloudNetwork.Enabled {
+ opts.PublicNet.EnableIPv4 = true
+ }
+
+ // Create the server
+ server, err := s.scope.HCloudClient.CreateServer(ctx, opts)
+ if err != nil {
+ if hcloudutil.HandleRateLimitExceeded(hm, err, "CreateServer") {
+ // RateLimit was reached. Condition and Event got already created.
+ return nil, fmt.Errorf("failed to create HCloud server %s: %w", hm.Name, err)
+ }
+ msg := fmt.Sprintf("failed to create HCloud server %s: %s", hm.Name, err.Error())
+ s.scope.Logger.Error(nil, msg)
+ // No condition was set yet. Set a general condition to false.
+ conditions.MarkFalse(hm, infrav1.ServerCreateSucceededCondition,
+ infrav1.ServerCreateFailedReason, clusterv1.ConditionSeverityWarning, "%s", msg)
+ record.Warn(hm, "FailedCreateHCloudServer", msg)
+ return nil, handleRateLimit(hm, err, "CreateServer", msg)
+ }
+
+ // set ssh keys to status
+ hm.Status.SSHKeys = caphSSHKeys
+
+ conditions.MarkTrue(hm, infrav1.ServerCreateSucceededCondition)
+ record.Eventf(hm, "SuccessfulCreate", "Created new server %s with ID %d", server.Name, server.ID)
+ return server, nil
+}
+
+// getSSHKeys collects the set of SSH keys to use when creating a server in Hetzner Cloud,
+// and validates that they exist in the HCloud API.
+//
+// The function:
+// 1. Starts with the SSH keys defined in HCloudMachine.Spec.SSHKeys.
+// If none are defined there, it falls back to HetznerCluster.Spec.SSHKeys.HCloud.
+// 2. Always adds the SSH key referenced in the Hetzner secret (if present),
+// ensuring it is included even if not listed in the spec.
+// 3. Fetches the complete list of SSH keys stored in HCloud via the API.
+// 4. Verifies that every SSH key referenced in the spec or secret exists in HCloud.
+// If any key is missing, it updates machine conditions and returns an error.
+// 5. Builds and returns two slices:
+// - caphSSHKeys: the logical set of SSH keys referenced in the spec/secret,
+// suitable for storing in the HCloudMachine status.
+// - hcloudSSHKeys: the corresponding HCloud API objects, suitable for passing
+// to the HCloud CreateServer API call.
+func (s *Service) getSSHKeys(ctx context.Context) (
+ caphSSHKeys []infrav1.SSHKey,
+ hcloudSSHKeys []*hcloud.SSHKey,
+ reterr error,
+) {
+ caphSSHKeys = s.scope.HCloudMachine.Spec.SSHKeys
// if no ssh keys are specified on the machine, take the ones from the cluster
- if len(sshKeySpecs) == 0 {
- sshKeySpecs = s.scope.HetznerCluster.Spec.SSHKeys.HCloud
+ if len(caphSSHKeys) == 0 {
+ caphSSHKeys = s.scope.HetznerCluster.Spec.SSHKeys.HCloud
}
// always add ssh key from secret if one is found
-
- // this code is redundant with a similar one on cluster level but is necessary if ClusterClass is used
- // as in ClusterClass we cannot store anything in HetznerCluster object
sshKeyName := s.scope.HetznerSecret().Data[s.scope.HetznerCluster.Spec.HetznerSecret.Key.SSHKey]
if len(sshKeyName) > 0 {
// Check if the SSH key name already exists
keyExists := false
- for _, key := range sshKeySpecs {
+ for _, key := range caphSSHKeys {
if string(sshKeyName) == key.Name {
keyExists = true
break
@@ -429,77 +1078,43 @@ func (s *Service) createServer(ctx context.Context) (*hcloud.Server, error) {
// If the SSH key name doesn't exist, append it
if !keyExists {
- sshKeySpecs = append(sshKeySpecs, infrav1.SSHKey{Name: string(sshKeyName)})
+ caphSSHKeys = append(caphSSHKeys, infrav1.SSHKey{Name: string(sshKeyName)})
}
}
// get all ssh keys that are stored in HCloud API
- sshKeysAPI, err := s.scope.HCloudClient.ListSSHKeys(ctx, hcloud.SSHKeyListOpts{})
+ allHcloudSSHKeys, err := s.scope.HCloudClient.ListSSHKeys(ctx, hcloud.SSHKeyListOpts{})
if err != nil {
- return nil, handleRateLimit(s.scope.HCloudMachine, err, "ListSSHKeys", "failed listing ssh keys from hcloud")
+ return nil, nil, handleRateLimit(s.scope.HCloudMachine, err, "ListSSHKeys", "failed listing ssh keys from hcloud")
}
- // find matching keys and store them
- opts.SSHKeys, err = filterHCloudSSHKeys(sshKeysAPI, sshKeySpecs)
- if err != nil {
- conditions.MarkFalse(
- s.scope.HCloudMachine,
- infrav1.ServerCreateSucceededCondition,
- infrav1.SSHKeyNotFoundReason,
- clusterv1.ConditionSeverityError,
- "%s",
- err.Error(),
- )
- return nil, errServerCreateNotPossible
- }
-
- // set up network if available
- if net := s.scope.HetznerCluster.Status.Network; net != nil {
- opts.Networks = []*hcloud.Network{{
- ID: net.ID,
- }}
+ // Create a map, so we can easily check if each caphSSHKey exist in HCloud.
+ sshKeysAPIMap := make(map[string]*hcloud.SSHKey, len(allHcloudSSHKeys))
+ for i, sshKey := range allHcloudSSHKeys {
+ sshKeysAPIMap[sshKey.Name] = allHcloudSSHKeys[i]
}
- // if no private network exists, there must be an IPv4 for the load balancer
- if !s.scope.HetznerCluster.Spec.HCloudNetwork.Enabled {
- opts.PublicNet.EnableIPv4 = true
- }
-
- // Create the server
- server, err := s.scope.HCloudClient.CreateServer(ctx, opts)
- if err != nil {
- if hcloudutil.HandleRateLimitExceeded(s.scope.HCloudMachine, err, "CreateServer") {
- // RateLimit was reached. Condition and Event got already created.
- return nil, fmt.Errorf("failed to create HCloud server %s: %w", s.scope.HCloudMachine.Name, err)
+ // Check caphSSHKeys. Fail if key is not in HCloud
+ for _, sshKeySpec := range caphSSHKeys {
+ sshKey, ok := sshKeysAPIMap[sshKeySpec.Name]
+ if !ok {
+ msg := fmt.Sprintf("ssh key %q not present in hcloud", sshKeySpec.Name)
+ s.scope.Logger.Error(nil, msg)
+ conditions.MarkFalse(
+ s.scope.HCloudMachine,
+ infrav1.ServerCreateSucceededCondition,
+ infrav1.SSHKeyNotFoundReason,
+ clusterv1.ConditionSeverityError,
+ "%s", msg)
+ return nil, nil, errServerCreateNotPossible
}
- // No condition was set yet. Set a general condition to false.
- conditions.MarkFalse(
- s.scope.HCloudMachine,
- infrav1.ServerCreateSucceededCondition,
- infrav1.ServerCreateFailedReason,
- clusterv1.ConditionSeverityWarning,
- "%s",
- err.Error(),
- )
- record.Warnf(s.scope.HCloudMachine,
- "FailedCreateHCloudServer",
- "Failed to create HCloud server %s: %s",
- s.scope.Name(),
- err,
- )
- errMsg := fmt.Sprintf("failed to create HCloud server %s", s.scope.HCloudMachine.Name)
- return nil, handleRateLimit(s.scope.HCloudMachine, err, "CreateServer", errMsg)
+ hcloudSSHKeys = append(hcloudSSHKeys, sshKey)
}
- // set ssh keys to status
- s.scope.HCloudMachine.Status.SSHKeys = sshKeySpecs
-
- conditions.MarkTrue(s.scope.HCloudMachine, infrav1.ServerCreateSucceededCondition)
- record.Eventf(s.scope.HCloudMachine, "SuccessfulCreate", "Created new server %s with ID %d", server.Name, server.ID)
- return server, nil
+ return caphSSHKeys, hcloudSSHKeys, nil
}
-func (s *Service) getServerImage(ctx context.Context) (*hcloud.Image, error) {
+func (s *Service) getServerImage(ctx context.Context, imageName string) (*hcloud.Image, error) {
key := fmt.Sprintf("%s%s", infrav1.NameHetznerProviderPrefix, "image-name")
// Get server type so we can filter for images with correct architecture
@@ -513,7 +1128,8 @@ func (s *Service) getServerImage(ctx context.Context) (*hcloud.Image, error) {
infrav1.ServerCreateSucceededCondition,
infrav1.ServerTypeNotFoundReason,
clusterv1.ConditionSeverityError,
- "failed to get server type - nil type",
+ "failed to get server type %q",
+ string(s.scope.HCloudMachine.Spec.Type),
)
return nil, errServerCreateNotPossible
}
@@ -522,7 +1138,7 @@ func (s *Service) getServerImage(ctx context.Context) (*hcloud.Image, error) {
// this is needed because snapshots don't have a name, only descriptions and labels
listOpts := hcloud.ImageListOpts{
ListOpts: hcloud.ListOpts{
- LabelSelector: fmt.Sprintf("%s==%s", key, s.scope.HCloudMachine.Spec.ImageName),
+ LabelSelector: fmt.Sprintf("%s==%s", key, imageName),
},
Architecture: []hcloud.Architecture{serverType.Architecture},
}
@@ -534,7 +1150,7 @@ func (s *Service) getServerImage(ctx context.Context) (*hcloud.Image, error) {
// query for an existing image by name.
listOpts = hcloud.ImageListOpts{
- Name: s.scope.HCloudMachine.Spec.ImageName,
+ Name: imageName,
Architecture: []hcloud.Architecture{serverType.Architecture},
}
imagesByName, err := s.scope.HCloudClient.ListImages(ctx, listOpts)
@@ -545,8 +1161,9 @@ func (s *Service) getServerImage(ctx context.Context) (*hcloud.Image, error) {
images = append(images, imagesByName...)
if len(images) > 1 {
- err := fmt.Errorf("image is ambiguous - %d images have name %s", len(images), s.scope.HCloudMachine.Spec.ImageName)
- record.Warnf(s.scope.HCloudMachine, "ImageNameAmbiguous", err.Error())
+ err := fmt.Errorf("image is ambiguous - %d images have name %s",
+ len(images), imageName)
+ record.Warn(s.scope.HCloudMachine, "ImageNameAmbiguous", err.Error())
conditions.MarkFalse(s.scope.HCloudMachine,
infrav1.ServerCreateSucceededCondition,
infrav1.ImageAmbiguousReason,
@@ -558,7 +1175,7 @@ func (s *Service) getServerImage(ctx context.Context) (*hcloud.Image, error) {
}
if len(images) == 0 {
err := fmt.Errorf("no image found with name %s", s.scope.HCloudMachine.Spec.ImageName)
- record.Warnf(s.scope.HCloudMachine, "ImageNotFound", err.Error())
+ record.Warn(s.scope.HCloudMachine, "ImageNotFound", err.Error())
conditions.MarkFalse(s.scope.HCloudMachine,
infrav1.ServerCreateSucceededCondition,
infrav1.ImageNotFoundReason,
@@ -586,6 +1203,9 @@ func (s *Service) handleServerStatusOff(ctx context.Context, server *hcloud.Serv
if err := s.scope.HCloudClient.PowerOnServer(ctx, server); err != nil {
if hcloud.IsError(err, hcloud.ErrorCodeLocked) {
// if server is locked, we just retry again
+ conditions.MarkFalse(s.scope.HCloudMachine, infrav1.ServerAvailableCondition,
+ "PowerOnServerFailed", clusterv1.ConditionSeverityInfo,
+ "handleServerStatusOff: server locked. Will retry")
return reconcile.Result{RequeueAfter: 30 * time.Second}, nil
}
return reconcile.Result{}, handleRateLimit(s.scope.HCloudMachine, err, "PowerOnServer", "failed to power on server")
@@ -600,6 +1220,8 @@ func (s *Service) handleServerStatusOff(ctx context.Context, server *hcloud.Serv
if err := s.scope.HCloudClient.PowerOnServer(ctx, server); err != nil {
if hcloud.IsError(err, hcloud.ErrorCodeLocked) {
// if server is locked, we just retry again
+ conditions.MarkFalse(s.scope.HCloudMachine, infrav1.ServerAvailableCondition,
+ "PowerOnServerFailed", clusterv1.ConditionSeverityInfo, "handleServerStatusOff: server locked. Will retry")
return reconcile.Result{RequeueAfter: 30 * time.Second}, nil
}
return reconcile.Result{}, handleRateLimit(s.scope.HCloudMachine, err, "PowerOnServer", "failed to power on server")
@@ -714,7 +1336,7 @@ func (s *Service) findServer(ctx context.Context) (*hcloud.Server, error) {
if len(servers) > 1 {
err := fmt.Errorf("found %d servers with name %s", len(servers), s.scope.Name())
- record.Warnf(s.scope.HCloudMachine, "MultipleInstances", err.Error())
+ record.Warn(s.scope.HCloudMachine, "MultipleInstances", err.Error())
return nil, err
}
@@ -722,26 +1344,12 @@ func (s *Service) findServer(ctx context.Context) (*hcloud.Server, error) {
return nil, nil
}
- return servers[0], nil
-}
+ s.scope.Logger.Info("DeprecationWarning finding Server by labels is no longer needed. We plan to remove that feature and rename findServer to getServer", "err", err)
-func validateLabels(server *hcloud.Server, labels map[string]string) error {
- for key, val := range labels {
- wantVal, found := server.Labels[key]
- if !found {
- return fmt.Errorf("did not find label with key %q: %w", key, errMissingLabel)
- }
- if wantVal != val {
- return fmt.Errorf("got %q, want %q: %w", val, wantVal, errWrongLabel)
- }
- }
- return nil
+ return servers[0], nil
}
-func statusFromHCloudServer(server *hcloud.Server) infrav1.HCloudMachineStatus {
- // set instance state
- instanceState := server.Status
-
+func statusAddresses(server *hcloud.Server) []clusterv1.MachineAddress {
// populate addresses
addresses := []clusterv1.MachineAddress{}
@@ -755,8 +1363,15 @@ func statusFromHCloudServer(server *hcloud.Server) infrav1.HCloudMachineStatus {
)
}
- if ip := server.PublicNet.IPv6.IP; ip.IsGlobalUnicast() {
+ if unicastIP := server.PublicNet.IPv6.IP; unicastIP.IsGlobalUnicast() {
+ // Create a copy. This is important, otherwise we modify the IP of `server`. This could lead
+ // to unexpected behaviour.
+ ip := append(net.IP(nil), unicastIP...)
+
+ // Hetzner returns the routed /64 base, increment last byte to obtain first usable address
+ // The local value gets changed, not the IP of `server`.
ip[15]++
+
addresses = append(
addresses,
clusterv1.MachineAddress{
@@ -776,10 +1391,7 @@ func statusFromHCloudServer(server *hcloud.Server) infrav1.HCloudMachineStatus {
)
}
- return infrav1.HCloudMachineStatus{
- InstanceState: &instanceState,
- Addresses: addresses,
- }
+ return addresses
}
func (s *Service) createLabels() map[string]string {
@@ -797,19 +1409,63 @@ func (s *Service) createLabels() map[string]string {
}
}
-func filterHCloudSSHKeys(sshKeysAPI []*hcloud.SSHKey, sshKeysSpec []infrav1.SSHKey) ([]*hcloud.SSHKey, error) {
- sshKeysAPIMap := make(map[string]*hcloud.SSHKey)
- for i, sshKey := range sshKeysAPI {
- sshKeysAPIMap[sshKey.Name] = sshKeysAPI[i]
+func updateHCloudMachineStatusFromServer(hm *infrav1.HCloudMachine, server *hcloud.Server) {
+ hm.Status.Addresses = statusAddresses(server)
+ hm.Status.InstanceState = ptr.To(server.Status)
+}
+
+func (s *Service) getSSHPrivateKey(ctx context.Context) (string, error) {
+ robotSecretName := s.scope.HetznerCluster.Spec.SSHKeys.RobotRescueSecretRef.Name
+ if robotSecretName == "" {
+ return "", errors.New("HetznerCluster.Spec.SSHKeys.RobotRescueSecretRef.Name is empty. Can not get ssh client")
}
- sshKeys := make([]*hcloud.SSHKey, len(sshKeysSpec))
- for i, sshKeySpec := range sshKeysSpec {
- sshKey, ok := sshKeysAPIMap[sshKeySpec.Name]
- if !ok {
- return nil, fmt.Errorf("ssh key not found in HCloud. SSH key name: %s", sshKeySpec.Name)
- }
- sshKeys[i] = sshKey
+ secretManager := secretutil.NewSecretManager(s.scope.Logger, s.scope.Client, s.scope.APIReader)
+
+ robotSecret, err := secretManager.ObtainSecret(ctx, types.NamespacedName{
+ Name: robotSecretName,
+ Namespace: s.scope.Namespace(),
+ })
+ if err != nil {
+ return "", fmt.Errorf("failed to get secret %q: %w", robotSecretName, err)
+ }
+
+ if robotSecret == nil {
+ return "", fmt.Errorf("failed to obtain secret %s/%s", s.scope.Namespace(), robotSecretName)
+ }
+
+ privateKey := string(robotSecret.Data[s.scope.HetznerCluster.Spec.SSHKeys.RobotRescueSecretRef.Key.PrivateKey])
+ if privateKey == "" {
+ return "", fmt.Errorf("key %q in secret %q is missing or empty. Failed to get ssh-private-key",
+ s.scope.HetznerCluster.Spec.SSHKeys.RobotRescueSecretRef.Key.PrivateKey,
+ robotSecretName)
}
- return sshKeys, nil
+
+ return privateKey, nil
+}
+
+// getSSHClient uses HetznerCluster.Spec.SSHKeys.RobotRescueSecretRef to get the ssh private key.
+// Then it creates a sshClient connected to the first IP of the HCloudMachine.
+func (s *Service) getSSHClient(ctx context.Context) (sshclient.Client, error) {
+ hm := s.scope.HCloudMachine
+
+ privateKey, err := s.getSSHPrivateKey(ctx)
+ if err != nil {
+ return nil, fmt.Errorf("getSSHPrivateKey failed: %w", err)
+ }
+
+ if len(hm.Status.Addresses) == 0 {
+ // This should never happen.
+ return nil, errors.New("internal error: HCloudMachine.Status.Addresses empty. Can not connect via ssh")
+ }
+ ip := hm.Status.Addresses[0].Address
+
+ // Unfortunately the hcloud API does not provide the sshd hostkey of the rescue system.
+ // We need to trust the network. In theory a man-in-the-middle attack is possible.
+ hcloudSSHClient := s.scope.SSHClientFactory.NewClient(sshclient.Input{
+ IP: ip,
+ PrivateKey: privateKey,
+ Port: 22,
+ })
+ return hcloudSSHClient, nil
}
diff --git a/pkg/services/hcloud/server/server_suite_test.go b/pkg/services/hcloud/server/server_suite_test.go
index 7028e88a6..cb8109dac 100644
--- a/pkg/services/hcloud/server/server_suite_test.go
+++ b/pkg/services/hcloud/server/server_suite_test.go
@@ -25,11 +25,16 @@ import (
"github.com/hetznercloud/hcloud-go/v2/hcloud/schema"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
+ corev1 "k8s.io/api/core/v1"
+ utilruntime "k8s.io/apimachinery/pkg/util/runtime"
+ "k8s.io/kubectl/pkg/scheme"
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
+ ctrl "sigs.k8s.io/controller-runtime"
infrav1 "github.com/syself/cluster-api-provider-hetzner/api/v1beta1"
"github.com/syself/cluster-api-provider-hetzner/pkg/scope"
hcloudclient "github.com/syself/cluster-api-provider-hetzner/pkg/services/hcloud/client"
+ "github.com/syself/cluster-api-provider-hetzner/test/helpers"
)
const serverJSON = `
@@ -188,12 +193,10 @@ const serverJSON = `
"volumes": []
}`
-var server *hcloud.Server
-
-const instanceState = hcloud.ServerStatusRunning
-
-var ips = []string{"1.2.3.4", "2001:db8::3", "10.0.0.2"}
-var addressTypes = []clusterv1.MachineAddressType{clusterv1.MachineExternalIP, clusterv1.MachineExternalIP, clusterv1.MachineInternalIP}
+var (
+ testEnv *helpers.TestEnvironment
+ ctx = ctrl.SetupSignalHandler()
+)
func TestServer(t *testing.T) {
RegisterFailHandler(Fail)
@@ -201,14 +204,41 @@ func TestServer(t *testing.T) {
}
var _ = BeforeSuite(func() {
+ utilruntime.Must(corev1.AddToScheme(scheme.Scheme))
+ utilruntime.Must(infrav1.AddToScheme(scheme.Scheme))
+ utilruntime.Must(clusterv1.AddToScheme(scheme.Scheme))
+
+ testEnv = helpers.NewTestEnvironment()
+
+ go func() {
+ defer GinkgoRecover()
+ Expect(testEnv.StartManager(ctx)).To(Succeed())
+ }()
+
+ <-testEnv.Manager.Elected()
+
+ // wait for webhook port to be open prior to running tests
+ testEnv.WaitForWebhooks()
+})
+
+var _ = AfterSuite(func() {
+ Expect(testEnv.Stop()).To(Succeed())
+})
+
+func newTestServer() *hcloud.Server {
var serverSchema schema.Server
b := []byte(serverJSON)
var buffer bytes.Buffer
- Expect(json.Compact(&buffer, b))
- Expect(json.Unmarshal(buffer.Bytes(), &serverSchema)).To(Succeed())
-
- server = hcloud.ServerFromSchema(serverSchema)
-})
+ err := json.Compact(&buffer, b)
+ if err != nil {
+ panic(err)
+ }
+ err = json.Unmarshal(buffer.Bytes(), &serverSchema)
+ if err != nil {
+ panic(err)
+ }
+ return hcloud.ServerFromSchema(serverSchema)
+}
func newTestService(hcloudMachine *infrav1.HCloudMachine, hcloudClient hcloudclient.Client) *Service {
return &Service{
diff --git a/pkg/services/hcloud/server/server_test.go b/pkg/services/hcloud/server/server_test.go
index 54761c4e8..f2519b495 100644
--- a/pkg/services/hcloud/server/server_test.go
+++ b/pkg/services/hcloud/server/server_test.go
@@ -19,45 +19,59 @@ package server
import (
"context"
"fmt"
+ "testing"
"time"
"github.com/hetznercloud/hcloud-go/v2/hcloud"
+ "github.com/mitchellh/copystructure"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
+ "github.com/stretchr/testify/mock"
+ "github.com/stretchr/testify/require"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime"
"k8s.io/utils/ptr"
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
+ capierrors "sigs.k8s.io/cluster-api/errors" //nolint:staticcheck // we will handle that, when we update to capi v1.11
"sigs.k8s.io/cluster-api/util/conditions"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
infrav1 "github.com/syself/cluster-api-provider-hetzner/api/v1beta1"
"github.com/syself/cluster-api-provider-hetzner/pkg/scope"
- fakeclient "github.com/syself/cluster-api-provider-hetzner/pkg/services/hcloud/client/fake"
+ sshclient "github.com/syself/cluster-api-provider-hetzner/pkg/services/baremetal/client/ssh"
+ fakehcloudclient "github.com/syself/cluster-api-provider-hetzner/pkg/services/hcloud/client/fake"
+ "github.com/syself/cluster-api-provider-hetzner/pkg/services/hcloud/client/mocks"
+ "github.com/syself/cluster-api-provider-hetzner/test/helpers"
)
-var _ = Describe("statusFromHCloudServer", func() {
- var sts infrav1.HCloudMachineStatus
- BeforeEach(func() {
- sts = statusFromHCloudServer(server)
- })
- It("should have the right instance state", func() {
- Expect(*sts.InstanceState).To(Equal(instanceState))
- })
- It("should have three addresses", func() {
- Expect(len(sts.Addresses)).To(Equal(3))
- })
- It("should have the right address IPs", func() {
- for i, addr := range sts.Addresses {
- Expect(addr.Address).To(Equal(ips[i]))
- }
- })
- It("should have the right address types", func() {
- for i, addr := range sts.Addresses {
- Expect(addr.Type).To(Equal(addressTypes[i]))
- }
- })
-})
+func Test_statusAddresses(t *testing.T) {
+ server := newTestServer()
+
+ // Create deep copy.
+ saved, err := copystructure.Copy(server)
+ require.NoError(t, err)
+
+ addresses := statusAddresses(server)
+
+ // should have three addresses
+ require.Equal(t, 3, len(addresses))
+
+ // should have the right address IPs
+ ips := []string{"1.2.3.4", "2001:db8::1", "10.0.0.2"}
+ for i, addr := range addresses {
+ require.Equal(t, ips[i], addr.Address)
+ }
+
+ // Check that input was not altered.
+ require.Equal(t, saved, server)
+
+ // should have the right address types
+ addressTypes := []clusterv1.MachineAddressType{clusterv1.MachineExternalIP, clusterv1.MachineExternalIP, clusterv1.MachineInternalIP}
+ for i, addr := range addresses {
+ require.Equal(t, addressTypes[i], addr.Type)
+ }
+}
type testCaseStatusFromHCloudServer struct {
isControlPlane bool
@@ -109,113 +123,9 @@ var _ = DescribeTable("createLabels",
}),
)
-var _ = Describe("filterHCloudSSHKeys", func() {
- type testCaseFilterHCloudSSHKeys struct {
- sshKeysSpec []infrav1.SSHKey
- expectedOutput []*hcloud.SSHKey
- }
-
- var sshKeysAPI []*hcloud.SSHKey
- BeforeEach(func() {
- sshKeysAPI = []*hcloud.SSHKey{
- {
- Fingerprint: "b7:2f:30:a0:2f:6c:58:6c:21:04:58:61:ba:06:3b:2f",
- Name: "sshkey1",
- ID: 42,
- },
- {
- Fingerprint: "b7:2f:30:a0:2f:6c:58:6c:21:04:58:61:ba:06:3b:3g",
- Name: "sshkey2",
- ID: 43,
- },
- {
- Fingerprint: "b7:2f:30:a0:2f:6c:58:6c:21:04:58:61:ba:06:3b:4h",
- Name: "sshkey3",
- ID: 44,
- },
- }
- })
- _ = DescribeTable("no_error",
- func(tc testCaseFilterHCloudSSHKeys) {
- Expect(filterHCloudSSHKeys(sshKeysAPI, tc.sshKeysSpec)).Should(Equal(tc.expectedOutput))
- },
- Entry("no_error_same_length", testCaseFilterHCloudSSHKeys{
- sshKeysSpec: []infrav1.SSHKey{
- {
- Fingerprint: "b7:2f:30:a0:2f:6c:58:6c:21:04:58:61:ba:06:3b:2f",
- Name: "sshkey1",
- },
- {
- Fingerprint: "b7:2f:30:a0:2f:6c:58:6c:21:04:58:61:ba:06:3b:3g",
- Name: "sshkey2",
- },
- {
- Fingerprint: "b7:2f:30:a0:2f:6c:58:6c:21:04:58:61:ba:06:3b:4f",
- Name: "sshkey3",
- },
- },
- expectedOutput: []*hcloud.SSHKey{
- {
- Fingerprint: "b7:2f:30:a0:2f:6c:58:6c:21:04:58:61:ba:06:3b:2f",
- Name: "sshkey1",
- ID: 42,
- },
- {
- Fingerprint: "b7:2f:30:a0:2f:6c:58:6c:21:04:58:61:ba:06:3b:3g",
- Name: "sshkey2",
- ID: 43,
- },
- {
- Fingerprint: "b7:2f:30:a0:2f:6c:58:6c:21:04:58:61:ba:06:3b:4h",
- Name: "sshkey3",
- ID: 44,
- },
- },
- }),
- Entry("no_error_different_length", testCaseFilterHCloudSSHKeys{
- sshKeysSpec: []infrav1.SSHKey{
- {
- Fingerprint: "b7:2f:30:a0:2f:6c:58:6c:21:04:58:61:ba:06:3b:2f",
- Name: "sshkey1",
- },
- {
- Fingerprint: "b7:2f:30:a0:2f:6c:58:6c:21:04:58:61:ba:06:3b:4f",
- Name: "sshkey3",
- },
- },
- expectedOutput: []*hcloud.SSHKey{
- {
- Fingerprint: "b7:2f:30:a0:2f:6c:58:6c:21:04:58:61:ba:06:3b:2f",
- Name: "sshkey1",
- ID: 42,
- },
- {
- Fingerprint: "b7:2f:30:a0:2f:6c:58:6c:21:04:58:61:ba:06:3b:4h",
- Name: "sshkey3",
- ID: 44,
- },
- },
- }),
- )
-
- It("should error", func() {
- _, err := filterHCloudSSHKeys(sshKeysAPI, []infrav1.SSHKey{
- {
- Fingerprint: "b7:2f:30:a0:2f:6c:58:6c:21:04:58:61:ba:06:3b:2f",
- Name: "sshkey1",
- },
- {
- Fingerprint: "b7:2f:30:a0:2f:6c:58:6c:21:04:58:61:ba:06:3b:5i",
- Name: "sshkey4",
- },
- })
- Expect(err).To(HaveOccurred())
- })
-})
-
var _ = Describe("handleServerStatusOff", func() {
var hcloudMachine *infrav1.HCloudMachine
- client := fakeclient.NewHCloudClientFactory().NewClient("")
+ client := fakehcloudclient.NewHCloudClientFactory().NewClient("")
server, err := client.CreateServer(context.Background(), hcloud.ServerCreateOpts{Name: "serverName"})
Expect(err).To(Succeed())
@@ -302,46 +212,6 @@ var _ = Describe("handleServerStatusOff", func() {
})
})
-var _ = Describe("Test ValidateLabels", func() {
- type testCaseValidateLabels struct {
- gotLabels map[string]string
- wantLabels map[string]string
- expectError error
- }
-
- DescribeTable("Test ValidateLabels",
- func(tc testCaseValidateLabels) {
- err := validateLabels(&hcloud.Server{Labels: tc.gotLabels}, tc.wantLabels)
-
- if tc.expectError != nil {
- Expect(err).To(MatchError(tc.expectError))
- } else {
- Expect(err).To(BeNil())
- }
- },
- Entry("exact equality", testCaseValidateLabels{
- gotLabels: map[string]string{"key1": "val1", "key2": "val2"},
- wantLabels: map[string]string{"key1": "val1", "key2": "val2"},
- expectError: nil,
- }),
- Entry("subset of labels", testCaseValidateLabels{
- gotLabels: map[string]string{"key1": "val1", "otherkey": "otherval", "key2": "val2"},
- wantLabels: map[string]string{"key1": "val1", "key2": "val2"},
- expectError: nil,
- }),
- Entry("wrong value", testCaseValidateLabels{
- gotLabels: map[string]string{"key1": "val1", "otherkey": "otherval", "key2": "otherval"},
- wantLabels: map[string]string{"key1": "val1", "key2": "val2"},
- expectError: errWrongLabel,
- }),
- Entry("missing key", testCaseValidateLabels{
- gotLabels: map[string]string{"key1": "val1", "otherkey": "otherval"},
- wantLabels: map[string]string{"key1": "val1", "key2": "val2"},
- expectError: errMissingLabel,
- }),
- )
-})
-
var _ = Describe("Test handleRateLimit", func() {
type testCaseHandleRateLimit struct {
hm *infrav1.HCloudMachine
@@ -435,6 +305,719 @@ var _ = Describe("Test handleRateLimit", func() {
)
})
+var _ = Describe("getSSHKeys", func() {
+ var (
+ service *Service
+ hcloudClient *mocks.Client
+ )
+
+ BeforeEach(func() {
+ hcloudClient = mocks.NewClient(GinkgoT())
+ clusterScope, err := scope.NewClusterScope(scope.ClusterScopeParams{
+ Client: testEnv.Manager.GetClient(),
+ APIReader: testEnv.Manager.GetAPIReader(),
+ HCloudClient: hcloudClient,
+ Logger: GinkgoLogr,
+
+ Cluster: &clusterv1.Cluster{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "clustername",
+ Namespace: "default",
+ },
+ },
+
+ HetznerCluster: &infrav1.HetznerCluster{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "clustername",
+ Namespace: "default",
+ },
+ Spec: infrav1.HetznerClusterSpec{
+ HetznerSecret: infrav1.HetznerSecretRef{
+ Name: "secretname",
+ Key: infrav1.HetznerSecretKeyRef{
+ SSHKey: "hcloud-ssh-key-name",
+ },
+ },
+ },
+ },
+
+ HetznerSecret: &corev1.Secret{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "secretname",
+ Namespace: "default",
+ },
+ Data: map[string][]byte{
+ "hcloud-ssh-key-name": []byte("sshKey1"),
+ },
+ },
+ })
+ Expect(err).To(BeNil())
+
+ service = &Service{
+ scope: &scope.MachineScope{
+ ClusterScope: *clusterScope,
+ HCloudMachine: &infrav1.HCloudMachine{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "hcloudmachinename",
+ Namespace: "default",
+ },
+ },
+ },
+ }
+ })
+
+ AfterEach(func() {
+ Expect(hcloudClient.AssertExpectations(GinkgoT())).To(BeTrue())
+ })
+
+ It("uses HCloudMachine.Spec.SSHKeys if present", func() {
+ By("populating the HCloudMachine.Spec.SSHKeys")
+ service.scope.HCloudMachine.Spec.SSHKeys = []infrav1.SSHKey{
+ {
+ Name: "sshKey2",
+ Fingerprint: "b7:2f:30:a0:2f:6c:58:6c:21:04:58:61:ba:06:3b:2f",
+ },
+ {
+ Name: "sshKey3",
+ Fingerprint: "b7:2f:30:a0:2f:6c:58:6c:21:04:58:61:ba:06:3b:4f",
+ },
+ }
+
+ By("ensuring that the mocked hcloud client returns all the ssh keys")
+ sshKeysByHCloudClient := []*hcloud.SSHKey{
+ {
+ ID: 1,
+ Name: "sshKey1",
+ Fingerprint: "b7:2f:30:a0:2f:6c:58:6c:21:04:58:61:ba:06:3b:1f",
+ },
+ {
+ ID: 2,
+ Name: "sshKey2",
+ Fingerprint: "b7:2f:30:a0:2f:6c:58:6c:21:04:58:61:ba:06:3b:2f",
+ },
+ {
+ ID: 3,
+ Name: "sshKey3",
+ Fingerprint: "b7:2f:30:a0:2f:6c:58:6c:21:04:58:61:ba:06:3b:4f",
+ },
+ }
+
+ hcloudClient.On("ListSSHKeys", mock.Anything, mock.Anything).Return(sshKeysByHCloudClient, nil)
+
+ By("ensuring that the getSSHKeys method returns all the referenced ssh keys")
+ caphSSHKeys, hcloudSSHKeys, err := service.getSSHKeys(context.Background())
+ Expect(err).To(BeNil())
+
+ Expect(caphSSHKeys).To(ConsistOf([]infrav1.SSHKey{
+ {
+ Name: "sshKey1",
+ },
+ {
+ Name: "sshKey2",
+ Fingerprint: "b7:2f:30:a0:2f:6c:58:6c:21:04:58:61:ba:06:3b:2f",
+ },
+ {
+ Name: "sshKey3",
+ Fingerprint: "b7:2f:30:a0:2f:6c:58:6c:21:04:58:61:ba:06:3b:4f",
+ },
+ }))
+
+ Expect(hcloudSSHKeys).To(ConsistOf(sshKeysByHCloudClient))
+ })
+
+ It("falls back to HetznerCluster.Spec.SSHKeys.HCloud, if HCloudMachine.Spec.SSHKeys is empty", func() {
+ By("populating the HCloudMachine.Spec.SSHKeys")
+ service.scope.HetznerCluster.Spec.SSHKeys.HCloud = []infrav1.SSHKey{
+ {
+ Name: "sshKey2",
+ Fingerprint: "b7:2f:30:a0:2f:6c:58:6c:21:04:58:61:ba:06:3b:2f",
+ },
+ {
+ Name: "sshKey3",
+ Fingerprint: "b7:2f:30:a0:2f:6c:58:6c:21:04:58:61:ba:06:3b:4f",
+ },
+ }
+
+ By("ensuring that the mocked hcloud client returns all the ssh keys")
+ sshKeysByHCloudClient := []*hcloud.SSHKey{
+ {
+ ID: 1,
+ Name: "sshKey1",
+ Fingerprint: "b7:2f:30:a0:2f:6c:58:6c:21:04:58:61:ba:06:3b:1f",
+ },
+ {
+ ID: 2,
+ Name: "sshKey2",
+ Fingerprint: "b7:2f:30:a0:2f:6c:58:6c:21:04:58:61:ba:06:3b:2f",
+ },
+ {
+ ID: 3,
+ Name: "sshKey3",
+ Fingerprint: "b7:2f:30:a0:2f:6c:58:6c:21:04:58:61:ba:06:3b:4f",
+ },
+ }
+
+ hcloudClient.On("ListSSHKeys", mock.Anything, mock.Anything).Return(sshKeysByHCloudClient, nil)
+
+ By("ensuring that the getSSHKeys method returns all the referenced ssh keys")
+ caphSSHKeys, hcloudSSHKeys, err := service.getSSHKeys(context.Background())
+ Expect(err).To(BeNil())
+
+ Expect(caphSSHKeys).To(ConsistOf([]infrav1.SSHKey{
+ {
+ Name: "sshKey1",
+ },
+ {
+ Name: "sshKey2",
+ Fingerprint: "b7:2f:30:a0:2f:6c:58:6c:21:04:58:61:ba:06:3b:2f",
+ },
+ {
+ Name: "sshKey3",
+ Fingerprint: "b7:2f:30:a0:2f:6c:58:6c:21:04:58:61:ba:06:3b:4f",
+ },
+ }))
+
+ Expect(hcloudSSHKeys).To(ConsistOf(sshKeysByHCloudClient))
+ })
+
+ It("one of the ssh key defined in HCloudMachine.Spec.SSHKeys is not present in hcloud", func() {
+ By("populating the HCloudMachine.Spec.SSHKeys")
+ service.scope.HCloudMachine.Spec.SSHKeys = []infrav1.SSHKey{
+ {
+ Name: "sshKey2",
+ Fingerprint: "b7:2f:30:a0:2f:6c:58:6c:21:04:58:61:ba:06:3b:2f",
+ },
+ {
+ Name: "sshKey3",
+ Fingerprint: "b7:2f:30:a0:2f:6c:58:6c:21:04:58:61:ba:06:3b:4f",
+ },
+ }
+
+ By("ensuring that the mocked hcloud client doesn't return one of the ssh key")
+ sshKeysByHCloudClient := []*hcloud.SSHKey{
+ {
+ ID: 1,
+ Name: "sshKey1",
+ Fingerprint: "b7:2f:30:a0:2f:6c:58:6c:21:04:58:61:ba:06:3b:1f",
+ },
+ {
+ ID: 2,
+ Name: "sshKey2",
+ Fingerprint: "b7:2f:30:a0:2f:6c:58:6c:21:04:58:61:ba:06:3b:2f",
+ },
+ }
+
+ hcloudClient.On("ListSSHKeys", mock.Anything, mock.Anything).Return(sshKeysByHCloudClient, nil)
+
+ By("ensuring that the getSSHKeys method fails")
+ _, _, err := service.getSSHKeys(context.Background())
+ Expect(err).ToNot(BeNil())
+ })
+
+ It("adds secret SSH key if not already present", func() {
+ // no machine keys, secretKey should be added
+
+ sshKeysByHCloudClient := []*hcloud.SSHKey{
+ {
+ ID: 1,
+ Name: "sshKey1",
+ Fingerprint: "b7:2f:30:a0:2f:6c:58:6c:21:04:58:61:ba:06:3b:1f",
+ },
+ }
+
+ hcloudClient.On("ListSSHKeys", mock.Anything, mock.Anything).Return(sshKeysByHCloudClient, nil)
+
+ caphKeys, hcloudSSHKeys, err := service.getSSHKeys(context.Background())
+ Expect(err).ToNot(HaveOccurred())
+
+ Expect(caphKeys).To(ConsistOf([]infrav1.SSHKey{
+ {
+ Name: "sshKey1",
+ },
+ }))
+
+ Expect(hcloudSSHKeys).To(ConsistOf(sshKeysByHCloudClient))
+ })
+
+ It("does not duplicate secret SSH key if already in list", func() {
+ sshKeyName := "sshKey1"
+ sshKeyFingerprint := "b7:2f:30:a0:2f:6c:58:6c:21:04:58:61:ba:06:3b:1f"
+
+ service.scope.HCloudMachine.Spec.SSHKeys = []infrav1.SSHKey{
+ {
+ Name: sshKeyName,
+ Fingerprint: sshKeyFingerprint,
+ },
+ }
+
+ sshKeysByHCloudClient := []*hcloud.SSHKey{
+ {
+ ID: 1,
+ Name: sshKeyName,
+ Fingerprint: sshKeyFingerprint,
+ },
+ }
+
+ hcloudClient.On("ListSSHKeys", mock.Anything, mock.Anything).Return(sshKeysByHCloudClient, nil)
+
+ caphKeys, hcloudSSHKeys, err := service.getSSHKeys(context.Background())
+ Expect(err).ToNot(HaveOccurred())
+
+ Expect(caphKeys).To(ConsistOf([]infrav1.SSHKey{
+ {
+ Name: sshKeyName,
+ Fingerprint: sshKeyFingerprint,
+ },
+ }))
+
+ Expect(hcloudSSHKeys).To(ConsistOf(sshKeysByHCloudClient))
+ })
+})
+
+var _ = Describe("Reconcile", func() {
+ var (
+ service *Service
+ testNs *corev1.Namespace
+ hcloudClient *mocks.Client
+ )
+
+ testScheme := runtime.NewScheme()
+ err := infrav1.AddToScheme(testScheme)
+ Expect(err).To(BeNil())
+
+ BeforeEach(func() {
+ hcloudClient = mocks.NewClient(GinkgoT())
+ testNs, err = testEnv.CreateNamespace(ctx, "server-reconcile")
+ Expect(err).To(BeNil())
+
+ clusterScope, err := scope.NewClusterScope(scope.ClusterScopeParams{
+ Client: testEnv.Manager.GetClient(),
+ APIReader: testEnv.Manager.GetAPIReader(),
+ HCloudClient: hcloudClient,
+ Logger: GinkgoLogr,
+
+ Cluster: &clusterv1.Cluster{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "clustername",
+ Namespace: testNs.Name,
+ },
+ },
+
+ HetznerCluster: &infrav1.HetznerCluster{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "clustername",
+ Namespace: testNs.Name,
+ },
+ Spec: infrav1.HetznerClusterSpec{
+ HetznerSecret: infrav1.HetznerSecretRef{
+ Name: "secretname",
+ Key: infrav1.HetznerSecretKeyRef{
+ SSHKey: "hcloud-ssh-key-name",
+ },
+ },
+ SSHKeys: infrav1.HetznerSSHKeys{
+ HCloud: []infrav1.SSHKey{},
+ RobotRescueSecretRef: infrav1.SSHSecretRef{
+ Name: "rescue-ssh-secret",
+ Key: infrav1.SSHSecretKeyRef{
+ Name: "sshkey-name",
+ PublicKey: "public-key",
+ PrivateKey: "private-key",
+ },
+ },
+ },
+ },
+ },
+
+ HetznerSecret: &corev1.Secret{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "secretname",
+ Namespace: testNs.Name,
+ },
+ Data: map[string][]byte{
+ "hcloud-ssh-key-name": []byte("sshKey1"),
+ },
+ },
+ })
+
+ Expect(err).To(BeNil())
+
+ err = testEnv.Create(ctx, helpers.GetDefaultSSHSecret("rescue-ssh-secret", testNs.Name))
+ Expect(err).To(BeNil())
+
+ service = &Service{
+ scope: &scope.MachineScope{
+ ClusterScope: *clusterScope,
+
+ Machine: &clusterv1.Machine{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "machineName",
+ Namespace: testNs.Name,
+ },
+ Spec: clusterv1.MachineSpec{
+ FailureDomain: ptr.To("nbg1"),
+ },
+ },
+
+ HCloudMachine: &infrav1.HCloudMachine{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "hcloudmachinename",
+ Namespace: testNs.Name,
+ },
+ Spec: infrav1.HCloudMachineSpec{
+ Type: "cpx11",
+ ImageName: "ubuntu-24.04",
+ SSHKeys: []infrav1.SSHKey{
+ {
+ Name: "sshKey1",
+ Fingerprint: "b7:2f:30:a0:2f:6c:58:6c:21:04:58:61:ba:06:3b:1f",
+ },
+ },
+ },
+ },
+ SSHClientFactory: testEnv.BaremetalSSHClientFactory,
+ },
+ }
+ })
+
+ AfterEach(func() {
+ Expect(hcloudClient.AssertExpectations(GinkgoT())).To(BeTrue())
+ Expect(testEnv.Cleanup(ctx, testNs)).To(Succeed())
+ })
+
+ It("sets the region in status of hcloudMachine, by fetching the failure domain from machine.spec", func() {
+ By("calling reconcile")
+ _, err := service.Reconcile(ctx)
+ Expect(err).To(BeNil())
+
+ By("ensuring the region is set in the status of hcloudMachine")
+ Expect(service.scope.HCloudMachine.Status.Region).To(Equal(infrav1.Region("nbg1")))
+
+ By("ensuring the BootstrapReady condition is marked as false")
+ Expect(isPresentAndFalseWithReason(service.scope.HCloudMachine, infrav1.BootstrapReadyCondition, infrav1.BootstrapNotReadyReason)).To(BeTrue())
+ })
+
+ It("sets the region in status of hcloudMachine, by fetching the failure domain from cluster.status if machine.spec.failureDomain is empty", func() {
+ By("setting the failure domain in cluster.status")
+ service.scope.Machine.Spec = clusterv1.MachineSpec{}
+ service.scope.Cluster.Status.FailureDomains = clusterv1.FailureDomains{
+ "nbg1": clusterv1.FailureDomainSpec{},
+ }
+
+ By("calling reconcile")
+ _, err := service.Reconcile(ctx)
+ Expect(err).To(BeNil())
+
+ By("ensuring the region is set in the status of hcloudMachine")
+ Expect(service.scope.HCloudMachine.Status.Region).To(Equal(infrav1.Region("nbg1")))
+ })
+
+ It("sets the CreateMachineError if the ProviderID is set on the HCloudMachine but the actual server was not found in the cloud", func() {
+ By("setting the bootstrap data")
+ err = testEnv.Create(ctx, &corev1.Secret{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "bootstrapsecret",
+ Namespace: testNs.Name,
+ },
+ Data: map[string][]byte{
+ "value": []byte("dummy-bootstrap-data"),
+ },
+ })
+ Expect(err).To(BeNil())
+
+ By("setting the ProviderID on the HCloudMachine")
+ service.scope.HCloudMachine.Spec.ProviderID = ptr.To("hcloud://1234567")
+ err = testEnv.Create(ctx, service.scope.HCloudMachine)
+ Expect(err).To(BeNil())
+
+ service.scope.Machine.Spec.Bootstrap.DataSecretName = ptr.To("bootstrapsecret")
+
+ By("ensuring that the hcloud client returns both server and error as nil")
+ hcloudClient.On("GetServer", mock.Anything, int64(1234567)).Return(nil, nil)
+ hcloudClient.On("ListServers", mock.Anything, mock.Anything).Return(nil, nil)
+
+ By("calling reconcile")
+ _, err := service.Reconcile(ctx)
+ Expect(err).To(BeNil())
+
+ By("validating if CreateMachineError was set on HCloudMachine object")
+ Expect(*service.scope.HCloudMachine.Status.FailureReason).To(Equal(capierrors.CreateMachineError))
+ Expect(*service.scope.HCloudMachine.Status.FailureMessage).To(Equal("hcloud server (\"hcloud://1234567\") no longer available. Setting MachineError."))
+ })
+
+ It("transitions the BootStrate from BootStateUnset -> BootStateBootingToRealOS -> BootStateOperatingSystemRunning (imageName)", func() {
+ By("setting the bootstrap data")
+ err = testEnv.Create(ctx, &corev1.Secret{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "bootstrapsecret",
+ Namespace: testNs.Name,
+ },
+ Data: map[string][]byte{
+ "value": []byte("dummy-bootstrap-data"),
+ },
+ })
+ Expect(err).To(BeNil())
+
+ err = testEnv.Create(ctx, service.scope.HCloudMachine)
+ Expect(err).To(BeNil())
+
+ service.scope.Machine.Spec.Bootstrap.DataSecretName = ptr.To("bootstrapsecret")
+
+ hcloudClient.On("GetServerType", mock.Anything, mock.Anything).Return(&hcloud.ServerType{
+ Architecture: hcloud.ArchitectureX86,
+ }, nil)
+
+ hcloudClient.On("ListImages", mock.Anything, hcloud.ImageListOpts{
+ ListOpts: hcloud.ListOpts{
+ LabelSelector: "caph-image-name==ubuntu-24.04",
+ },
+ Architecture: []hcloud.Architecture{hcloud.ArchitectureX86},
+ }).Return([]*hcloud.Image{
+ {
+ ID: 123456,
+ Name: "ubuntu",
+ },
+ }, nil)
+
+ hcloudClient.On("ListImages", mock.Anything, hcloud.ImageListOpts{
+ Name: "ubuntu-24.04",
+ Architecture: []hcloud.Architecture{hcloud.ArchitectureX86},
+ }).Return([]*hcloud.Image{}, nil)
+
+ hcloudClient.On("ListSSHKeys", mock.Anything, mock.Anything).Return([]*hcloud.SSHKey{
+ {
+ ID: 1,
+ Name: "sshKey1",
+ Fingerprint: "b7:2f:30:a0:2f:6c:58:6c:21:04:58:61:ba:06:3b:1f",
+ },
+ }, nil)
+
+ hcloudClient.On("CreateServer", mock.Anything, mock.Anything).Return(&hcloud.Server{
+ ID: 1,
+ Name: "hcloudmachinename",
+ Status: hcloud.ServerStatusInitializing,
+ }, nil)
+
+ By("calling reconcile")
+ _, err := service.Reconcile(ctx)
+ Expect(err).To(BeNil())
+ Expect(service.scope.HCloudMachine.Status.FailureReason).To(BeNil())
+
+ By("ensuring the bootstate has transitioned to BootStateBootingToRealOS")
+
+ Expect(service.scope.HCloudMachine.Status.BootState).To(Equal(infrav1.HCloudBootStateBootingToRealOS))
+
+ By("reconciling again")
+ hcloudClient.On("GetServer", mock.Anything, mock.Anything).Return(&hcloud.Server{
+ ID: 1,
+ Name: "hcloudmachinename",
+ Status: hcloud.ServerStatusRunning,
+ }, nil)
+
+ _, err = service.Reconcile(ctx)
+ Expect(err).To(BeNil())
+ Expect(service.scope.HCloudMachine.Status.FailureReason).To(BeNil())
+
+ By("ensuring the bootstate has transitioned to BootStateOperatingSystemRunning once the server's status changes to running")
+ Expect(service.scope.HCloudMachine.Status.BootState).To(Equal(infrav1.HCloudBootStateOperatingSystemRunning))
+ })
+
+ It("transitions to BootStateOperatingSystemRunning (imageURL)", func() {
+ By("setting the bootstrap data")
+ err = testEnv.Create(ctx, &corev1.Secret{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "bootstrapsecret",
+ Namespace: testNs.Name,
+ },
+ Data: map[string][]byte{
+ "value": []byte("dummy-bootstrap-data"),
+ },
+ })
+ Expect(err).To(BeNil())
+ service.scope.ImageURLCommand = "dummy-image-url-command.sh"
+ service.scope.HCloudMachine.Spec.ImageName = ""
+ service.scope.HCloudMachine.Spec.ImageURL = "oci://example.com/repo/image:v1"
+ err = testEnv.Create(ctx, service.scope.HCloudMachine)
+ Expect(err).To(BeNil())
+
+ service.scope.Machine.Spec.Bootstrap.DataSecretName = ptr.To("bootstrapsecret")
+
+ hcloudClient.On("GetServerType", mock.Anything, mock.Anything).Return(&hcloud.ServerType{
+ Architecture: hcloud.ArchitectureX86,
+ }, nil)
+
+ hcloudClient.On("ListImages", mock.Anything, hcloud.ImageListOpts{
+ ListOpts: hcloud.ListOpts{
+ LabelSelector: "caph-image-name==ubuntu-24.04",
+ },
+ Architecture: []hcloud.Architecture{hcloud.ArchitectureX86},
+ }).Return([]*hcloud.Image{
+ {
+ ID: 123456,
+ Name: "ubuntu",
+ },
+ }, nil)
+
+ hcloudClient.On("ListImages", mock.Anything, hcloud.ImageListOpts{
+ Name: "ubuntu-24.04",
+ Architecture: []hcloud.Architecture{hcloud.ArchitectureX86},
+ }).Return([]*hcloud.Image{}, nil)
+
+ hcloudClient.On("ListSSHKeys", mock.Anything, mock.Anything).Return([]*hcloud.SSHKey{
+ {
+ ID: 1,
+ Name: "sshKey1",
+ Fingerprint: "b7:2f:30:a0:2f:6c:58:6c:21:04:58:61:ba:06:3b:1f",
+ },
+ }, nil)
+
+ hcloudClient.On("CreateServer", mock.Anything, mock.Anything).Return(&hcloud.Server{
+ ID: 1,
+ Name: "hcloudmachinename",
+ Status: hcloud.ServerStatusInitializing,
+ }, nil)
+
+ By("calling reconcile")
+ _, err := service.Reconcile(ctx)
+ Expect(err).To(BeNil())
+ Expect(service.scope.HCloudMachine.Status.FailureReason).To(BeNil())
+
+ By("ensuring the bootstate has transitioned to Initializing")
+
+ Expect(service.scope.HCloudMachine.Status.BootState).To(Equal(infrav1.HCloudBootStateInitializing))
+
+ By("reconciling again")
+ hcloudClient.On("GetServer", mock.Anything, mock.Anything).Return(&hcloud.Server{
+ ID: 1,
+ Name: "hcloudmachinename",
+ Status: hcloud.ServerStatusRunning,
+ }, nil).Once()
+
+ startTime := time.Now()
+ hcloudClient.On("EnableRescueSystem", mock.Anything, mock.Anything, mock.Anything).Return(
+ hcloud.ServerEnableRescueResult{
+ Action: &hcloud.Action{
+ ID: 334455,
+ Status: hcloud.ActionStatusRunning,
+ Command: "",
+ Progress: 0,
+ Started: startTime,
+ Finished: time.Time{},
+ ErrorCode: "",
+ ErrorMessage: "",
+ Resources: []*hcloud.ActionResource{},
+ },
+ RootPassword: "",
+ }, nil).Once()
+ _, err = service.Reconcile(ctx)
+ Expect(err).To(BeNil())
+ Expect(service.scope.HCloudMachine.Status.FailureReason).To(BeNil())
+
+ By("ensuring the bootstate has transitioned to EnablingRescue")
+ Expect(service.scope.HCloudMachine.Status.BootState).To(Equal(infrav1.HCloudBootStateEnablingRescue))
+
+ By("reconcile again --------------------------------------------------------")
+ hcloudClient.On("GetServer", mock.Anything, mock.Anything).Return(&hcloud.Server{
+ ID: 1,
+ Name: "hcloudmachinename",
+ RescueEnabled: true,
+ Status: hcloud.ServerStatusRunning,
+ }, nil).Once()
+ hcloudClient.On("GetAction", mock.Anything, mock.Anything).Return(
+ &hcloud.Action{
+ ID: 1,
+ Status: hcloud.ActionStatusSuccess,
+ Command: "",
+ Progress: 0,
+ Started: startTime,
+ Finished: time.Now(),
+ ErrorCode: "",
+ ErrorMessage: "",
+ Resources: []*hcloud.ActionResource{},
+ }, nil,
+ )
+ _, err = service.Reconcile(ctx)
+ Expect(err).To(BeNil())
+ Expect(service.scope.HCloudMachine.Status.FailureReason).To(BeNil())
+
+ By("ensuring the bootstate has transitioned to EnablingRescue")
+ Expect(service.scope.HCloudMachine.Status.BootState).To(Equal(infrav1.HCloudBootStateEnablingRescue))
+
+ By("reconcile again --------------------------------------------------------")
+ hcloudClient.On("GetServer", mock.Anything, mock.Anything).Return(&hcloud.Server{
+ ID: 1,
+ Name: "hcloudmachinenameWithRescueEnabled",
+ RescueEnabled: true,
+ Status: hcloud.ServerStatusRunning,
+ }, nil).Once()
+
+ testEnv.RescueSSHClient.On("Reboot").Return(sshclient.Output{
+ Err: nil,
+ StdOut: "ok",
+ StdErr: "",
+ })
+ _, err = service.Reconcile(ctx)
+ Expect(err).To(BeNil())
+ Expect(service.scope.HCloudMachine.Status.FailureReason).To(BeNil())
+
+ By("ensuring the bootstate has transitioned to BootingToRescue")
+ Expect(service.scope.HCloudMachine.Status.BootState).To(Equal(infrav1.HCloudBootStateBootingToRescue))
+
+ By("reconcile again --------------------------------------------------------")
+ hcloudClient.On("GetServer", mock.Anything, mock.Anything).Return(&hcloud.Server{
+ ID: 1,
+ Name: "hcloudmachinenameWithRescueEnabled",
+ Status: hcloud.ServerStatusRunning,
+ }, nil).Once()
+ testEnv.RescueSSHClient.On("GetHostName").Return(sshclient.Output{
+ StdOut: "rescue",
+ StdErr: "",
+ Err: nil,
+ })
+ startImageURLCommandMock := testEnv.RescueSSHClient.On("StartImageURLCommand", mock.Anything, mock.Anything, mock.Anything, mock.Anything, "hcloudmachinename", []string{"sda"}).Return(0, "", nil)
+ _, err = service.Reconcile(ctx)
+ Expect(err).To(BeNil())
+ Expect(service.scope.HCloudMachine.Status.FailureReason).To(BeNil())
+
+ By("ensuring the bootstate has transitioned to RunningImageCommand")
+ Expect(service.scope.HCloudMachine.Status.BootState).To(Equal(infrav1.HCloudBootStateRunningImageCommand))
+ startImageURLCommandMock.Parent.AssertNumberOfCalls(GinkgoT(), "StartImageURLCommand", 1)
+
+ By("reconcile again --------------------------------------------------------")
+ testEnv.RescueSSHClient.On("GetHostName").Return(sshclient.Output{
+ StdOut: "rescue",
+ StdErr: "",
+ Err: nil,
+ })
+ testEnv.RescueSSHClient.On("StateOfImageURLCommand").Return(sshclient.ImageURLCommandStateFinishedSuccessfully, "output-of-image-url-command", nil)
+ hcloudClient.On("GetServer", mock.Anything, mock.Anything).Return(&hcloud.Server{
+ ID: 1,
+ Name: "hcloudmachinenameWithRescueEnabled",
+ RescueEnabled: true,
+ Status: hcloud.ServerStatusRunning,
+ }, nil).Once()
+ _, err = service.Reconcile(ctx)
+ Expect(err).To(BeNil())
+ Expect(service.scope.HCloudMachine.Status.FailureReason).To(BeNil())
+
+ By("ensuring the bootstate has transitioned to BootingToRealOS")
+ Expect(service.scope.HCloudMachine.Status.BootState).To(Equal(infrav1.HCloudBootStateBootingToRealOS))
+
+ By("reconcile again --------------------------------------------------------")
+ hcloudClient.On("GetServer", mock.Anything, mock.Anything).Return(&hcloud.Server{
+ ID: 1,
+ Name: "hcloudmachinenameWithRescueEnabled",
+ RescueEnabled: true,
+ Status: hcloud.ServerStatusRunning,
+ }, nil).Once()
+ _, err = service.Reconcile(ctx)
+ Expect(err).To(BeNil())
+ Expect(service.scope.HCloudMachine.Status.FailureReason).To(BeNil())
+ By("ensuring the bootstate has transitioned to OperatingSystemRunning")
+ Expect(service.scope.HCloudMachine.Status.BootState).To(Equal(infrav1.HCloudBootStateOperatingSystemRunning))
+ })
+})
+
func isPresentAndFalseWithReason(getter conditions.Getter, condition clusterv1.ConditionType, reason string) bool {
if !conditions.Has(getter, condition) {
return false
diff --git a/pkg/utils/utils.go b/pkg/utils/utils.go
index 20541a024..ec51cd39f 100644
--- a/pkg/utils/utils.go
+++ b/pkg/utils/utils.go
@@ -221,3 +221,21 @@ func GetDefaultLogger(logLevel string) logr.Logger {
return zapr.NewLogger(zapLog)
}
+
+// IsLocalCacheUpToDate compares two ResourceVersions, and return true if the local cache is
+// up-to-date or ahead. Related: https://github.com/kubernetes-sigs/controller-runtime/issues/3320
+func IsLocalCacheUpToDate(rvLocalCache, rvAPIServer string) bool {
+ if len(rvLocalCache) < len(rvAPIServer) {
+ // RV of cache is behind.
+ return false
+ }
+ if len(rvLocalCache) > len(rvAPIServer) {
+ // RV of cache has changed like from "999" to "1000"
+ return true
+ }
+ if rvLocalCache >= rvAPIServer {
+ // RV of cache is equal, or ahead.
+ return true
+ }
+ return false
+}
diff --git a/pkg/utils/utils_test.go b/pkg/utils/utils_test.go
index 742debe77..d496edcc1 100644
--- a/pkg/utils/utils_test.go
+++ b/pkg/utils/utils_test.go
@@ -17,8 +17,11 @@ limitations under the License.
package utils_test
import (
+ "testing"
+
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
+ "github.com/stretchr/testify/require"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/utils/ptr"
@@ -321,3 +324,20 @@ var _ = Describe("Test FindOwnerRefFromList", func() {
}),
)
})
+
+func Test_IsLocalCacheUpToDate(t *testing.T) {
+ // true: localCache is up-to-date
+ require.True(t, utils.IsLocalCacheUpToDate("123", "123"))
+
+ // true: localCache is ahead apiserver
+ require.True(t, utils.IsLocalCacheUpToDate("124", "123"))
+
+ // true: localCache is ahead apiserver
+ require.True(t, utils.IsLocalCacheUpToDate("1000", "999"))
+
+ // false: localCache is behind apiserver.
+ require.False(t, utils.IsLocalCacheUpToDate("123", "124"))
+
+ // false: localCache is behind apiserver.
+ require.False(t, utils.IsLocalCacheUpToDate("999", "1000"))
+}
diff --git a/templates/cilium/cilium.yaml b/templates/cilium/cilium.yaml
index 8e6215096..ee1597f0c 100644
--- a/templates/cilium/cilium.yaml
+++ b/templates/cilium/cilium.yaml
@@ -2,20 +2,8 @@ rollOutCiliumPods: true
priorityClassName: "system-node-critical"
hubble:
- metrics:
- enabled:
- - dns:query;ignoreAAAA
- - drop
- - tcp
- - flow
- - icmp
- - http
- relay:
- enabled: true
- rollOutPods: true
- ui:
- enabled: true
- rollOutPods: true
+ enabled: false
+
ipam:
mode: "kubernetes"
@@ -24,3 +12,20 @@ kubeProxyReplacement: true
operator:
rollOutPods: true
priorityClassName: "system-node-critical"
+ replicas: 1
+ tolerations: # TODO: These Tolerations can be removed, when our PR https://github.com/cilium/cilium/pull/41098 got released.
+ - effect: NoSchedule
+ key: node.cluster.x-k8s.io/uninitialized
+ - effect: NoSchedule
+ key: node.cloudprovider.kubernetes.io/uninitialized
+ value: "true"
+ - key: CriticalAddonsOnly
+ operator: Exists
+ - effect: NoSchedule
+ key: node-role.kubernetes.io/master
+ operator: Exists
+ - effect: NoSchedule
+ key: node-role.kubernetes.io/control-plane
+ operator: Exists
+ - effect: NoSchedule
+ key: node.kubernetes.io/not-ready
diff --git a/templates/cluster-templates/bases/hcloud-kcp-ubuntu.yaml b/templates/cluster-templates/bases/hcloud-kcp-ubuntu.yaml
index c77fcf736..9b19b493a 100644
--- a/templates/cluster-templates/bases/hcloud-kcp-ubuntu.yaml
+++ b/templates/cluster-templates/bases/hcloud-kcp-ubuntu.yaml
@@ -203,7 +203,9 @@ spec:
- apt-get -y install at jq unzip wget socat mtr logrotate apt-transport-https
- sed -i '/swap/d' /etc/fstab
- swapoff -a
- - modprobe overlay && modprobe br_netfilter && sysctl --system
+ - modprobe overlay
+ - modprobe br_netfilter
+ - sysctl --system
- wget https://github.com/opencontainers/runc/releases/download/v$RUNC/runc.$ARCH
- wget https://github.com/opencontainers/runc/releases/download/v$RUNC/runc.sha256sum
- sha256sum --check --ignore-missing runc.sha256sum
@@ -217,14 +219,19 @@ spec:
- mkdir -p /etc/containerd
- containerd config default > /etc/containerd/config.toml
- sed -i "s/SystemdCgroup = false/SystemdCgroup = true/" /etc/containerd/config.toml
- - systemctl daemon-reload && systemctl enable containerd && systemctl start containerd
+ - systemctl daemon-reload
+ - systemctl enable containerd
+ - systemctl start containerd
- mkdir -p /etc/apt/keyrings/
- curl -fsSL https://pkgs.k8s.io/core:/stable:/v$TRIMMED_KUBERNETES_VERSION/deb/Release.key | sudo gpg --dearmor -o /etc/apt/keyrings/kubernetes-apt-keyring.gpg
- echo "deb [signed-by=/etc/apt/keyrings/kubernetes-apt-keyring.gpg] https://pkgs.k8s.io/core:/stable:/v$TRIMMED_KUBERNETES_VERSION/deb/ /" | sudo tee /etc/apt/sources.list.d/kubernetes.list
- apt-get update
- - apt-get install -y kubelet="$KUBERNETES_VERSION-*" kubeadm="$KUBERNETES_VERSION-*" kubectl="$KUBERNETES_VERSION-*" bash-completion && apt-mark hold kubelet kubectl kubeadm && systemctl enable kubelet
+ - apt-get install -y kubelet="$KUBERNETES_VERSION-*" kubeadm="$KUBERNETES_VERSION-*" kubectl="$KUBERNETES_VERSION-*" bash-completion
+ - apt-mark hold kubelet kubectl kubeadm
+ - systemctl enable kubelet
- kubeadm config images pull --kubernetes-version $KUBERNETES_VERSION
- echo 'source <(kubectl completion bash)' >>/root/.bashrc
- echo 'export KUBECONFIG=/etc/kubernetes/admin.conf' >>/root/.bashrc
- - apt-get -y autoremove && apt-get -y clean all
+ - apt-get -y autoremove
+ - apt-get -y clean all
version: "${KUBERNETES_VERSION}"
diff --git a/templates/cluster-templates/bases/hetznerbaremetal-kcp-ubuntu.yaml b/templates/cluster-templates/bases/hetznerbaremetal-kcp-ubuntu.yaml
index 66fc39860..c47316f19 100644
--- a/templates/cluster-templates/bases/hetznerbaremetal-kcp-ubuntu.yaml
+++ b/templates/cluster-templates/bases/hetznerbaremetal-kcp-ubuntu.yaml
@@ -222,7 +222,9 @@ spec:
- apt-get -y install at jq unzip wget socat mtr logrotate apt-transport-https
- sed -i '/swap/d' /etc/fstab
- swapoff -a
- - modprobe overlay && modprobe br_netfilter && sysctl --system
+ - modprobe overlay
+ - modprobe br_netfilter
+ - sysctl --system
- wget https://github.com/opencontainers/runc/releases/download/v$RUNC/runc.$ARCH
- wget https://github.com/opencontainers/runc/releases/download/v$RUNC/runc.sha256sum
- sha256sum --check --ignore-missing runc.sha256sum
@@ -236,14 +238,19 @@ spec:
- mkdir -p /etc/containerd
- containerd config default > /etc/containerd/config.toml
- sed -i "s/SystemdCgroup = false/SystemdCgroup = true/" /etc/containerd/config.toml
- - systemctl daemon-reload && systemctl enable containerd && systemctl start containerd
+ - systemctl daemon-reload
+ - systemctl enable containerd
+ - systemctl start containerd
- mkdir -p /etc/apt/keyrings/
- curl -fsSL https://pkgs.k8s.io/core:/stable:/v$TRIMMED_KUBERNETES_VERSION/deb/Release.key | sudo gpg --dearmor -o /etc/apt/keyrings/kubernetes-apt-keyring.gpg
- echo "deb [signed-by=/etc/apt/keyrings/kubernetes-apt-keyring.gpg] https://pkgs.k8s.io/core:/stable:/v$TRIMMED_KUBERNETES_VERSION/deb/ /" | sudo tee /etc/apt/sources.list.d/kubernetes.list
- apt-get update
- - apt-get install -y kubelet="$KUBERNETES_VERSION-*" kubeadm="$KUBERNETES_VERSION-*" kubectl="$KUBERNETES_VERSION-*" bash-completion && apt-mark hold kubelet kubectl kubeadm && systemctl enable kubelet
+ - apt-get install -y kubelet="$KUBERNETES_VERSION-*" kubeadm="$KUBERNETES_VERSION-*" kubectl="$KUBERNETES_VERSION-*" bash-completion
+ - apt-mark hold kubelet kubectl kubeadm
+ - systemctl enable kubelet
- kubeadm config images pull --kubernetes-version $KUBERNETES_VERSION
- echo 'source <(kubectl completion bash)' >>/root/.bashrc
- echo 'export KUBECONFIG=/etc/kubernetes/admin.conf' >>/root/.bashrc
- - apt-get -y autoremove && apt-get -y clean all
+ - apt-get -y autoremove
+ - apt-get -y clean all
version: "${KUBERNETES_VERSION}"
diff --git a/templates/cluster-templates/bases/hetznerbaremetal-mt-control-plane-ubuntu.yaml b/templates/cluster-templates/bases/hetznerbaremetal-mt-control-plane-ubuntu.yaml
index 0798e1442..4650a3781 100644
--- a/templates/cluster-templates/bases/hetznerbaremetal-mt-control-plane-ubuntu.yaml
+++ b/templates/cluster-templates/bases/hetznerbaremetal-mt-control-plane-ubuntu.yaml
@@ -20,13 +20,20 @@ spec:
size: all
postInstallScript: |
#!/bin/bash
- mkdir -p /etc/cloud/cloud.cfg.d && touch /etc/cloud/cloud.cfg.d/99-custom-networking.cfg
+ # Bash Strict Mode: https://github.com/guettli/bash-strict-mode
+ trap 'echo -e "\n🤷 🚨 🔥 Warning: A command has failed. Exiting the script. Line was ($0:$LINENO): $(sed -n "$LINENO"p "$0" 2>/dev/null || true) 🔥 🚨 🤷 "; exit 3' ERR
+ set -Eeuo pipefail
+
+ mkdir -p /etc/cloud/cloud.cfg.d
+ touch /etc/cloud/cloud.cfg.d/99-custom-networking.cfg
echo "network: { config: disabled }" > /etc/cloud/cloud.cfg.d/99-custom-networking.cfg
- apt-get update && apt-get install -y cloud-init apparmor apparmor-utils
+
+ apt-get update
+
+ apt-get install -y cloud-init apparmor apparmor-utils
cloud-init clean --logs
sshSpec:
portAfterInstallImage: 22
- portAfterCloudInit: 22
secretRef:
name: robot-ssh
key:
diff --git a/templates/cluster-templates/bases/hetznerbaremetal-mt-md-1-ubuntu.yaml b/templates/cluster-templates/bases/hetznerbaremetal-mt-md-1-ubuntu.yaml
index 8d5fb6f54..624e2e5ba 100644
--- a/templates/cluster-templates/bases/hetznerbaremetal-mt-md-1-ubuntu.yaml
+++ b/templates/cluster-templates/bases/hetznerbaremetal-mt-md-1-ubuntu.yaml
@@ -20,13 +20,20 @@ spec:
size: all
postInstallScript: |
#!/bin/bash
- mkdir -p /etc/cloud/cloud.cfg.d && touch /etc/cloud/cloud.cfg.d/99-custom-networking.cfg
+ # Bash Strict Mode: https://github.com/guettli/bash-strict-mode
+ trap 'echo -e "\n🤷 🚨 🔥 Warning: A command has failed. Exiting the script. Line was ($0:$LINENO): $(sed -n "$LINENO"p "$0" 2>/dev/null || true) 🔥 🚨 🤷 "; exit 3' ERR
+ set -Eeuo pipefail
+
+ mkdir -p /etc/cloud/cloud.cfg.d
+ touch /etc/cloud/cloud.cfg.d/99-custom-networking.cfg
echo "network: { config: disabled }" > /etc/cloud/cloud.cfg.d/99-custom-networking.cfg
- apt-get update && apt-get install -y cloud-init apparmor apparmor-utils
+
+ apt-get update
+
+ apt-get install -y cloud-init apparmor apparmor-utils
cloud-init clean --logs
sshSpec:
portAfterInstallImage: 22
- portAfterCloudInit: 22
secretRef:
name: robot-ssh
key:
diff --git a/templates/cluster-templates/bases/kct-md-0-ubuntu.yaml b/templates/cluster-templates/bases/kct-md-0-ubuntu.yaml
index 8aa8fa37d..84ae6afac 100644
--- a/templates/cluster-templates/bases/kct-md-0-ubuntu.yaml
+++ b/templates/cluster-templates/bases/kct-md-0-ubuntu.yaml
@@ -113,7 +113,9 @@ spec:
- apt-get -y install at jq unzip wget socat mtr logrotate apt-transport-https
- sed -i '/swap/d' /etc/fstab
- swapoff -a
- - modprobe overlay && modprobe br_netfilter && sysctl --system
+ - modprobe overlay
+ - modprobe br_netfilter
+ - sysctl --system
- wget https://github.com/opencontainers/runc/releases/download/v$RUNC/runc.$ARCH
- wget https://github.com/opencontainers/runc/releases/download/v$RUNC/runc.sha256sum
- sha256sum --check --ignore-missing runc.sha256sum
@@ -127,13 +129,18 @@ spec:
- mkdir -p /etc/containerd
- containerd config default > /etc/containerd/config.toml
- sed -i "s/SystemdCgroup = false/SystemdCgroup = true/" /etc/containerd/config.toml
- - systemctl daemon-reload && systemctl enable containerd && systemctl start containerd
+ - systemctl daemon-reload
+ - systemctl enable containerd
+ - systemctl start containerd
- mkdir -p /etc/apt/keyrings/
- curl -fsSL https://pkgs.k8s.io/core:/stable:/v$TRIMMED_KUBERNETES_VERSION/deb/Release.key | sudo gpg --dearmor -o /etc/apt/keyrings/kubernetes-apt-keyring.gpg
- echo "deb [signed-by=/etc/apt/keyrings/kubernetes-apt-keyring.gpg] https://pkgs.k8s.io/core:/stable:/v$TRIMMED_KUBERNETES_VERSION/deb/ /" | sudo tee /etc/apt/sources.list.d/kubernetes.list
- apt-get update
- - apt-get install -y kubelet="$KUBERNETES_VERSION-*" kubeadm="$KUBERNETES_VERSION-*" kubectl="$KUBERNETES_VERSION-*" bash-completion && apt-mark hold kubelet kubectl kubeadm && systemctl enable kubelet
+ - apt-get install -y kubelet="$KUBERNETES_VERSION-*" kubeadm="$KUBERNETES_VERSION-*" kubectl="$KUBERNETES_VERSION-*" bash-completion
+ - apt-mark hold kubelet kubectl kubeadm
+ - systemctl enable kubelet
- kubeadm config images pull --kubernetes-version $KUBERNETES_VERSION
- echo 'source <(kubectl completion bash)' >>/root/.bashrc
- echo 'export KUBECONFIG=/etc/kubernetes/admin.conf' >>/root/.bashrc
- - apt-get -y autoremove && apt-get -y clean all
+ - apt-get -y autoremove
+ - apt-get -y clean all
diff --git a/templates/cluster-templates/cluster-class.yaml b/templates/cluster-templates/cluster-class.yaml
index 8d7881df3..3373ae246 100644
--- a/templates/cluster-templates/cluster-class.yaml
+++ b/templates/cluster-templates/cluster-class.yaml
@@ -526,26 +526,34 @@ spec:
- apt-get -y install at jq unzip wget socat mtr logrotate apt-transport-https
- sed -i '/swap/d' /etc/fstab
- swapoff -a
- - modprobe overlay && modprobe br_netfilter && sysctl --system
+ - modprobe overlay
+ - modprobe br_netfilter
+ - sysctl --system
- wget https://github.com/containerd/containerd/releases/download/v$CONTAINERD/cri-containerd-cni-$CONTAINERD-linux-amd64.tar.gz
- wget https://github.com/containerd/containerd/releases/download/v$CONTAINERD/cri-containerd-cni-$CONTAINERD-linux-amd64.tar.gz.sha256sum
- sha256sum --check cri-containerd-cni-$CONTAINERD-linux-amd64.tar.gz.sha256sum
- tar --no-overwrite-dir -C / -xzf cri-containerd-cni-$CONTAINERD-linux-amd64.tar.gz
- rm -f cri-containerd-cni-$CONTAINERD-linux-amd64.tar.gz cri-containerd-cni-$CONTAINERD-linux-amd64.tar.gz.sha256sum
- - chmod -R 644 /etc/cni && chown -R root:root /etc/cni
+ - chmod -R 644 /etc/cni
+ - chown -R root:root /etc/cni
- mkdir -p /etc/containerd
- containerd config default > /etc/containerd/config.toml
- sed -i "s/SystemdCgroup = false/SystemdCgroup = true/" /etc/containerd/config.toml
- - systemctl daemon-reload && systemctl enable containerd && systemctl start containerd
+ - systemctl daemon-reload
+ - systemctl enable containerd
+ - systemctl start containerd
- mkdir -p /etc/apt/keyrings/
- curl -fsSL https://pkgs.k8s.io/core:/stable:/v$TRIMMED_KUBERNETES_VERSION/deb/Release.key | sudo gpg --dearmor -o /etc/apt/keyrings/kubernetes-apt-keyring.gpg
- echo "deb [signed-by=/etc/apt/keyrings/kubernetes-apt-keyring.gpg] https://pkgs.k8s.io/core:/stable:/v$TRIMMED_KUBERNETES_VERSION/deb/ /" | sudo tee /etc/apt/sources.list.d/kubernetes.list
- apt-get update
- - apt-get install -y kubelet="$KUBERNETES_VERSION-*" kubeadm="$KUBERNETES_VERSION-*" kubectl="$KUBERNETES_VERSION-*" bash-completion && apt-mark hold kubelet kubectl kubeadm && systemctl enable kubelet
+ - apt-get install -y kubelet="$KUBERNETES_VERSION-*" kubeadm="$KUBERNETES_VERSION-*" kubectl="$KUBERNETES_VERSION-*" bash-completion
+ - apt-mark hold kubelet kubectl kubeadm
+ - systemctl enable kubelet
- kubeadm config images pull --kubernetes-version $KUBERNETES_VERSION
- echo 'source <(kubectl completion bash)' >>/root/.bashrc
- echo 'export KUBECONFIG=/etc/kubernetes/admin.conf' >>/root/.bashrc
- - apt-get -y autoremove && apt-get -y clean all
+ - apt-get -y autoremove
+ - apt-get -y clean all
---
apiVersion: bootstrap.cluster.x-k8s.io/v1beta1
kind: KubeadmConfigTemplate
@@ -612,26 +620,34 @@ spec:
- apt-get -y install at jq unzip wget socat mtr logrotate apt-transport-https
- sed -i '/swap/d' /etc/fstab
- swapoff -a
- - modprobe overlay && modprobe br_netfilter && sysctl --system
+ - modprobe overlay
+ - modprobe br_netfilter
+ - sysctl --system
- wget https://github.com/containerd/containerd/releases/download/v$CONTAINERD/cri-containerd-cni-$CONTAINERD-linux-amd64.tar.gz
- wget https://github.com/containerd/containerd/releases/download/v$CONTAINERD/cri-containerd-cni-$CONTAINERD-linux-amd64.tar.gz.sha256sum
- sha256sum --check cri-containerd-cni-$CONTAINERD-linux-amd64.tar.gz.sha256sum
- tar --no-overwrite-dir -C / -xzf cri-containerd-cni-$CONTAINERD-linux-amd64.tar.gz
- rm -f cri-containerd-cni-$CONTAINERD-linux-amd64.tar.gz cri-containerd-cni-$CONTAINERD-linux-amd64.tar.gz.sha256sum
- - chmod -R 644 /etc/cni && chown -R root:root /etc/cni
+ - chmod -R 644 /etc/cni
+ - chown -R root:root /etc/cni
- mkdir -p /etc/containerd
- containerd config default > /etc/containerd/config.toml
- sed -i "s/SystemdCgroup = false/SystemdCgroup = true/" /etc/containerd/config.toml
- - systemctl daemon-reload && systemctl enable containerd && systemctl start containerd
+ - systemctl daemon-reload
+ - systemctl enable containerd
+ - systemctl start containerd
- mkdir -p /etc/apt/keyrings/
- curl -fsSL https://pkgs.k8s.io/core:/stable:/v$TRIMMED_KUBERNETES_VERSION/deb/Release.key | sudo gpg --dearmor -o /etc/apt/keyrings/kubernetes-apt-keyring.gpg
- echo "deb [signed-by=/etc/apt/keyrings/kubernetes-apt-keyring.gpg] https://pkgs.k8s.io/core:/stable:/v$TRIMMED_KUBERNETES_VERSION/deb/ /" | sudo tee /etc/apt/sources.list.d/kubernetes.list
- apt-get update
- - apt-get install -y kubelet="$KUBERNETES_VERSION-*" kubeadm="$KUBERNETES_VERSION-*" kubectl="$KUBERNETES_VERSION-*" bash-completion && apt-mark hold kubelet kubectl kubeadm && systemctl enable kubelet
+ - apt-get install -y kubelet="$KUBERNETES_VERSION-*" kubeadm="$KUBERNETES_VERSION-*" kubectl="$KUBERNETES_VERSION-*" bash-completion
+ - apt-mark hold kubelet kubectl kubeadm
+ - systemctl enable kubelet
- kubeadm config images pull --kubernetes-version $KUBERNETES_VERSION
- echo 'source <(kubectl completion bash)' >>/root/.bashrc
- echo 'export KUBECONFIG=/etc/kubernetes/admin.conf' >>/root/.bashrc
- - apt-get -y autoremove && apt-get -y clean all
+ - apt-get -y autoremove
+ - apt-get -y clean all
---
apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
kind: HetznerBareMetalRemediationTemplate
@@ -669,12 +685,19 @@ spec:
size: all
postInstallScript: |
#!/bin/bash
- mkdir -p /etc/cloud/cloud.cfg.d && touch /etc/cloud/cloud.cfg.d/99-custom-networking.cfg
+ # Bash Strict Mode: https://github.com/guettli/bash-strict-mode
+ trap 'echo -e "\n🤷 🚨 🔥 Warning: A command has failed. Exiting the script. Line was ($0:$LINENO): $(sed -n "$LINENO"p "$0" 2>/dev/null || true) 🔥 🚨 🤷 "; exit 3' ERR
+ set -Eeuo pipefail
+
+ mkdir -p /etc/cloud/cloud.cfg.d
+ touch /etc/cloud/cloud.cfg.d/99-custom-networking.cfg
echo "network: { config: disabled }" > /etc/cloud/cloud.cfg.d/99-custom-networking.cfg
- apt-get update && apt-get install -y cloud-init apparmor apparmor-utils
+
+ apt-get update
+
+ apt-get install -y cloud-init apparmor apparmor-utils
cloud-init clean --logs
sshSpec:
- portAfterCloudInit: 22
portAfterInstallImage: 22
secretRef:
key:
@@ -747,23 +770,31 @@ spec:
- apt-get -y install at jq unzip wget socat mtr logrotate apt-transport-https
- sed -i '/swap/d' /etc/fstab
- swapoff -a
- - modprobe overlay && modprobe br_netfilter && sysctl --system
+ - modprobe overlay
+ - modprobe br_netfilter
+ - sysctl --system
- wget https://github.com/containerd/containerd/releases/download/v$CONTAINERD/cri-containerd-cni-$CONTAINERD-linux-amd64.tar.gz
- wget https://github.com/containerd/containerd/releases/download/v$CONTAINERD/cri-containerd-cni-$CONTAINERD-linux-amd64.tar.gz.sha256sum
- sha256sum --check cri-containerd-cni-$CONTAINERD-linux-amd64.tar.gz.sha256sum
- tar --no-overwrite-dir -C / -xzf cri-containerd-cni-$CONTAINERD-linux-amd64.tar.gz
- rm -f cri-containerd-cni-$CONTAINERD-linux-amd64.tar.gz cri-containerd-cni-$CONTAINERD-linux-amd64.tar.gz.sha256sum
- - chmod -R 644 /etc/cni && chown -R root:root /etc/cni
+ - chmod -R 644 /etc/cni
+ - chown -R root:root /etc/cni
- mkdir -p /etc/containerd
- containerd config default > /etc/containerd/config.toml
- sed -i "s/SystemdCgroup = false/SystemdCgroup = true/" /etc/containerd/config.toml
- - systemctl daemon-reload && systemctl enable containerd && systemctl start containerd
+ - systemctl daemon-reload
+ - systemctl enable containerd
+ - systemctl start containerd
- mkdir -p /etc/apt/keyrings/
- curl -fsSL https://pkgs.k8s.io/core:/stable:/v$TRIMMED_KUBERNETES_VERSION/deb/Release.key | sudo gpg --dearmor -o /etc/apt/keyrings/kubernetes-apt-keyring.gpg
- echo "deb [signed-by=/etc/apt/keyrings/kubernetes-apt-keyring.gpg] https://pkgs.k8s.io/core:/stable:/v$TRIMMED_KUBERNETES_VERSION/deb/ /" | sudo tee /etc/apt/sources.list.d/kubernetes.list
- apt-get update
- - apt-get install -y kubelet="$KUBERNETES_VERSION-*" kubeadm="$KUBERNETES_VERSION-*" kubectl="$KUBERNETES_VERSION-*" bash-completion && apt-mark hold kubelet kubectl kubeadm && systemctl enable kubelet
+ - apt-get install -y kubelet="$KUBERNETES_VERSION-*" kubeadm="$KUBERNETES_VERSION-*" kubectl="$KUBERNETES_VERSION-*" bash-completion
+ - apt-mark hold kubelet kubectl kubeadm
+ - systemctl enable kubelet
- kubeadm config images pull --kubernetes-version $KUBERNETES_VERSION
- echo 'source <(kubectl completion bash)' >>/root/.bashrc
- echo 'export KUBECONFIG=/etc/kubernetes/admin.conf' >>/root/.bashrc
- - apt-get -y autoremove && apt-get -y clean all
+ - apt-get -y autoremove
+ - apt-get -y clean all
diff --git a/test/e2e/Makefile b/test/e2e/Makefile
index c28600779..f130b4e9e 100644
--- a/test/e2e/Makefile
+++ b/test/e2e/Makefile
@@ -58,27 +58,32 @@ e2e-cilium-templates:
# Missing: ["BIN_PATH"]
helm repo add cilium https://helm.cilium.io/
helm repo update cilium
- helm template cilium cilium/cilium --version 1.17.2 \
- --namespace kube-system \
- -f $(REPO_ROOT)/templates/cilium/cilium.yaml > $(REPO_ROOT)/test/e2e/data/cni/cilium/cilium.yaml
+ helm template cilium cilium/cilium --version 1.18.1 \
+ --namespace kube-system \
+ -f $(REPO_ROOT)/templates/cilium/cilium.yaml \
+ > $(REPO_ROOT)/test/e2e/data/cni/cilium/cilium.yaml
+
sed -i 's/$${BIN_PATH}/$$BIN_PATH/' $(REPO_ROOT)/test/e2e/data/cni/cilium/cilium.yaml
sed -i '1s/^/# Created by `make e2e-cilium-templates`\n/' $(REPO_ROOT)/test/e2e/data/cni/cilium/cilium.yaml
e2e-ccm-templates:
helm repo add syself https://charts.syself.com
+
helm template ccm syself/ccm-hcloud --version 1.0.11 \
--namespace kube-system \
--set pdb.enabled=false \
--set secret.name=hetzner \
--set secret.tokenKeyName=hcloud \
--set privateNetwork.enabled=false > $(REPO_ROOT)/test/e2e/data/ccm/hcloud-ccm.yaml
+
helm template ccm syself/ccm-hcloud --version 1.0.11 \
--namespace kube-system \
--set pdb.enabled=false \
--set secret.name=hetzner \
--set secret.tokenKeyName=hcloud \
--set privateNetwork.enabled=true > $(REPO_ROOT)/test/e2e/data/ccm/hcloud-ccm-network.yaml
- helm template ccm syself/ccm-hetzner --version 1.1.4 \
+
+ helm template ccm syself/ccm-hetzner --version 2.0.1 \
--namespace kube-system \
--set pdb.enabled=false \
--set privateNetwork.enabled=false > $(REPO_ROOT)/test/e2e/data/ccm/hcloud-ccm-hetzner.yaml
diff --git a/test/e2e/caph.go b/test/e2e/caph.go
index b5bec6e0d..4d6a8b697 100644
--- a/test/e2e/caph.go
+++ b/test/e2e/caph.go
@@ -62,7 +62,7 @@ func CaphClusterDeploymentSpec(ctx context.Context, inputGetter func() CaphClust
gomega.Expect(input.BootstrapClusterProxy).ToNot(gomega.BeNil(), "Invalid argument. input.BootstrapClusterProxy can't be nil when calling %s spec", specName)
gomega.Expect(os.MkdirAll(input.ArtifactFolder, 0o750)).To(gomega.Succeed(), "Invalid argument. input.ArtifactFolder can't be created for %s spec", specName)
gomega.Expect(input.E2EConfig.Variables).To(gomega.HaveKey(KubernetesVersion))
- gomega.Expect(input.E2EConfig.Variables).To(HaveValidVersion(input.E2EConfig.GetVariable(KubernetesVersion)))
+ gomega.Expect(input.E2EConfig.Variables).To(HaveValidVersion(input.E2EConfig.GetVariableOrEmpty(KubernetesVersion)))
// Setup a Namespace where to host objects for this spec and create a watcher for the namespace events.
namespace, cancelWatches = setupSpecNamespace(ctx, specName, input.BootstrapClusterProxy, input.ArtifactFolder)
@@ -83,7 +83,7 @@ func CaphClusterDeploymentSpec(ctx context.Context, inputGetter func() CaphClust
Flavor: input.Flavor,
Namespace: namespace.Name,
ClusterName: clusterName,
- KubernetesVersion: input.E2EConfig.GetVariable(KubernetesVersion),
+ KubernetesVersion: input.E2EConfig.GetVariableOrEmpty(KubernetesVersion),
ControlPlaneMachineCount: ptr.To(input.ControlPlaneMachineCount),
WorkerMachineCount: ptr.To(input.WorkerMachineCount),
},
@@ -97,6 +97,7 @@ func CaphClusterDeploymentSpec(ctx context.Context, inputGetter func() CaphClust
ginkgo.AfterEach(func() {
// Dumps all the resources in the spec namespace, then cleanups the cluster object and the spec namespace itself.
- dumpSpecResourcesAndCleanup(ctx, specName, input.BootstrapClusterProxy, input.ArtifactFolder, namespace, cancelWatches, clusterResources.Cluster, input.E2EConfig.GetIntervals, input.SkipCleanup)
+ dumpSpecResourcesAndCleanup(ctx, specName, input.BootstrapClusterProxy, input.ArtifactFolder, namespace, cancelWatches, clusterResources.Cluster, input.E2EConfig.GetIntervals, input.SkipCleanup, input.BootstrapClusterProxy.GetKubeconfigPath(),
+ input.ClusterctlConfigPath)
})
}
diff --git a/test/e2e/common.go b/test/e2e/common.go
index a5c1038a8..6285ae734 100644
--- a/test/e2e/common.go
+++ b/test/e2e/common.go
@@ -37,6 +37,13 @@ const (
KubernetesVersion = "KUBERNETES_VERSION"
CiliumPath = "CILIUM"
CiliumResources = "CILIUM_RESOURCES"
+
+ // TODO: We should clean up here.
+ // We only support the syself ccm.
+ // To make this clear, we should use the term "syself".
+ // Currently (in this context) "hetzner" means the syself-ccm,
+ // and "hcloud" means the hcloud ccm (which now supports bare-metal, too)
+ // Nevertheless, the hcloud/hetzner ccm is not supported.
CCMPath = "CCM"
CCMResources = "CCM_RESOURCES"
CCMNetworkPath = "CCM_NETWORK"
@@ -62,7 +69,7 @@ func setupSpecNamespace(ctx context.Context, specName string, clusterProxy frame
return namespace, cancelWatches
}
-func dumpSpecResourcesAndCleanup(ctx context.Context, specName string, clusterProxy framework.ClusterProxy, artifactFolder string, namespace *corev1.Namespace, cancelWatches context.CancelFunc, cluster *clusterv1.Cluster, intervalsGetter func(spec, key string) []interface{}, skipCleanup bool) {
+func dumpSpecResourcesAndCleanup(ctx context.Context, specName string, clusterProxy framework.ClusterProxy, artifactFolder string, namespace *corev1.Namespace, cancelWatches context.CancelFunc, cluster *clusterv1.Cluster, intervalsGetter func(spec, key string) []interface{}, skipCleanup bool, kubeConfigPath, clusterctlConfigPath string) {
var clusterName string
var clusterNamespace string
if cluster != nil {
@@ -77,9 +84,11 @@ func dumpSpecResourcesAndCleanup(ctx context.Context, specName string, clusterPr
// Dump all Cluster API related resources to artifacts before deleting them.
framework.DumpAllResources(ctx, framework.DumpAllResourcesInput{
- Lister: clusterProxy.GetClient(),
- Namespace: namespace.Name,
- LogPath: filepath.Join(artifactFolder, "clusters", clusterProxy.GetName(), "resources"),
+ Lister: clusterProxy.GetClient(),
+ Namespace: namespace.Name,
+ LogPath: filepath.Join(artifactFolder, "clusters", clusterProxy.GetName(), "resources"),
+ KubeConfigPath: kubeConfigPath,
+ ClusterctlConfigPath: clusterctlConfigPath,
})
} else {
clusterName = "empty"
@@ -92,8 +101,9 @@ func dumpSpecResourcesAndCleanup(ctx context.Context, specName string, clusterPr
// that cluster variable is not set even if the cluster exists, so we are calling DeleteAllClustersAndWait
// instead of DeleteClusterAndWait
framework.DeleteAllClustersAndWait(ctx, framework.DeleteAllClustersAndWaitInput{
- Client: clusterProxy.GetClient(),
- Namespace: namespace.Name,
+ ClusterProxy: clusterProxy,
+ Namespace: namespace.Name,
+ ClusterctlConfigPath: clusterctlConfigPath,
}, intervalsGetter(specName, "wait-delete-cluster")...)
Byf("Deleting namespace used for hosting the %q test spec", specName)
diff --git a/test/e2e/data/ccm/hcloud-ccm-hetzner.yaml b/test/e2e/data/ccm/hcloud-ccm-hetzner.yaml
index 7274b5ab5..2525f87dd 100644
--- a/test/e2e/data/ccm/hcloud-ccm-hetzner.yaml
+++ b/test/e2e/data/ccm/hcloud-ccm-hetzner.yaml
@@ -6,11 +6,11 @@ metadata:
name: ccm-ccm-hetzner
namespace: kube-system
labels:
- helm.sh/chart: ccm-hetzner-1.1.10
+ helm.sh/chart: ccm-hetzner-2.0.1
app: ccm
app.kubernetes.io/name: ccm-hetzner
app.kubernetes.io/instance: ccm
- app.kubernetes.io/version: "v1.18.0-0.0.5"
+ app.kubernetes.io/version: "v2.0.1"
app.kubernetes.io/managed-by: Helm
---
# Source: ccm-hetzner/templates/serviceaccount.yaml
@@ -20,11 +20,11 @@ metadata:
name: ccm-ccm-hetzner
namespace: kube-system
labels:
- helm.sh/chart: ccm-hetzner-1.1.10
+ helm.sh/chart: ccm-hetzner-2.0.1
app: ccm
app.kubernetes.io/name: ccm-hetzner
app.kubernetes.io/instance: ccm
- app.kubernetes.io/version: "v1.18.0-0.0.5"
+ app.kubernetes.io/version: "v2.0.1"
app.kubernetes.io/managed-by: Helm
roleRef:
apiGroup: rbac.authorization.k8s.io
@@ -42,11 +42,11 @@ metadata:
name: ccm-ccm-hetzner
namespace: kube-system
labels:
- helm.sh/chart: ccm-hetzner-1.1.10
+ helm.sh/chart: ccm-hetzner-2.0.1
app: ccm
app.kubernetes.io/name: ccm-hetzner
app.kubernetes.io/instance: ccm
- app.kubernetes.io/version: "v1.18.0-0.0.5"
+ app.kubernetes.io/version: "v2.0.1"
app.kubernetes.io/managed-by: Helm
spec:
replicas: 1
@@ -63,7 +63,12 @@ spec:
spec:
dnsPolicy: Default
serviceAccountName: ccm-ccm-hetzner
- securityContext: {}
+ securityContext:
+ {}
+ volumes:
+ - name: hetzner-secret
+ secret:
+ secretName: hetzner
tolerations:
# Introduced with CAPI v1.4, more info: https://cluster-api.sigs.k8s.io/developer/providers/bootstrap.html#taint-nodes-at-creation
- key: "node.cluster.x-k8s.io/uninitialized"
@@ -85,8 +90,9 @@ spec:
effect: "NoSchedule"
containers:
- name: ccm-hetzner
- securityContext: {}
- image: "ghcr.io/syself/hetzner-cloud-controller-manager:v1.18.0-0.0.5"
+ securityContext:
+ {}
+ image: "ghcr.io/syself/hetzner-cloud-controller-manager:v2.0.1"
imagePullPolicy: Always
command:
- "/bin/hetzner-cloud-controller-manager"
@@ -97,26 +103,11 @@ spec:
requests:
cpu: 100m
memory: 50Mi
+ volumeMounts:
+ - mountPath: /etc/hetzner-secret
+ name: hetzner-secret
+ readOnly: true
env:
- - name: NODE_NAME
- valueFrom:
- fieldRef:
- fieldPath: spec.nodeName
- - name: HCLOUD_TOKEN
- valueFrom:
- secretKeyRef:
- name: hetzner
- key: hcloud
- - name: ROBOT_USER_NAME
- valueFrom:
- secretKeyRef:
- name: hetzner
- key: robot-user
- - name: ROBOT_PASSWORD
- valueFrom:
- secretKeyRef:
- name: hetzner
- key: robot-password
- name: HCLOUD_DEBUG
value: "false"
- name: HCLOUD_LOAD_BALANCERS_ENABLED
diff --git a/test/e2e/data/ccm/hcloud-ccm-network.yaml b/test/e2e/data/ccm/hcloud-ccm-network.yaml
index 3302181de..592424213 100644
--- a/test/e2e/data/ccm/hcloud-ccm-network.yaml
+++ b/test/e2e/data/ccm/hcloud-ccm-network.yaml
@@ -6,11 +6,11 @@ metadata:
name: ccm-ccm-hcloud
namespace: kube-system
labels:
- helm.sh/chart: ccm-hetzner-1.1.10
+ helm.sh/chart: ccm-hcloud-1.0.11
app: ccm
app.kubernetes.io/name: ccm-hcloud
app.kubernetes.io/instance: ccm
- app.kubernetes.io/version: "v1.18.0-0.0.5"
+ app.kubernetes.io/version: "1.13.0"
app.kubernetes.io/managed-by: Helm
---
# Source: ccm-hcloud/templates/serviceaccount.yaml
@@ -20,11 +20,11 @@ metadata:
name: ccm-ccm-hcloud
namespace: kube-system
labels:
- helm.sh/chart: ccm-hetzner-1.1.10
+ helm.sh/chart: ccm-hcloud-1.0.11
app: ccm
app.kubernetes.io/name: ccm-hcloud
app.kubernetes.io/instance: ccm
- app.kubernetes.io/version: "v1.18.0-0.0.5"
+ app.kubernetes.io/version: "1.13.0"
app.kubernetes.io/managed-by: Helm
roleRef:
apiGroup: rbac.authorization.k8s.io
@@ -42,11 +42,11 @@ metadata:
name: ccm-ccm-hcloud
namespace: kube-system
labels:
- helm.sh/chart: ccm-hetzner-1.1.10
+ helm.sh/chart: ccm-hcloud-1.0.11
app: ccm
app.kubernetes.io/name: ccm-hcloud
app.kubernetes.io/instance: ccm
- app.kubernetes.io/version: "v1.18.0-0.0.5"
+ app.kubernetes.io/version: "1.13.0"
app.kubernetes.io/managed-by: Helm
spec:
replicas: 1
@@ -63,7 +63,8 @@ spec:
spec:
dnsPolicy: Default
serviceAccountName: ccm-ccm-hcloud
- securityContext: {}
+ securityContext:
+ {}
tolerations:
# this taint is set by all kubelets running `--cloud-provider=external`
# so we should tolerate it to schedule the cloud controller manager
@@ -84,11 +85,12 @@ spec:
hostNetwork: true
containers:
- name: ccm-hcloud
- securityContext: {}
- image: "ghcr.io/syself/hetzner-cloud-controller-manager:v1.18.0-0.0.5"
+ securityContext:
+ {}
+ image: "docker.io/hetznercloud/hcloud-cloud-controller-manager:v1.13.0"
imagePullPolicy: Always
command:
- - "/bin/hetzner-cloud-controller-manager"
+ - "/bin/hcloud-cloud-controller-manager"
- "--cloud-provider=hcloud"
- "--leader-elect=true"
- "--allow-untagged-cloud"
@@ -108,16 +110,6 @@ spec:
secretKeyRef:
name: hetzner
key: hcloud
- - name: ROBOT_USER_NAME
- valueFrom:
- secretKeyRef:
- name: hetzner
- key: robot-user
- - name: ROBOT_PASSWORD
- valueFrom:
- secretKeyRef:
- name: hetzner
- key: robot-password
- name: HCLOUD_DEBUG
value: "false"
- name: HCLOUD_LOAD_BALANCERS_ENABLED
diff --git a/test/e2e/data/cni/cilium/cilium.yaml b/test/e2e/data/cni/cilium/cilium.yaml
index fcce09e84..6833b86ad 100644
--- a/test/e2e/data/cni/cilium/cilium.yaml
+++ b/test/e2e/data/cni/cilium/cilium.yaml
@@ -7,6 +7,7 @@ metadata:
name: "cilium-secrets"
labels:
app.kubernetes.io/part-of: cilium
+ annotations:
---
# Source: cilium/templates/cilium-agent/serviceaccount.yaml
apiVersion: v1
@@ -29,55 +30,6 @@ metadata:
name: "cilium-operator"
namespace: kube-system
---
-# Source: cilium/templates/hubble-relay/serviceaccount.yaml
-apiVersion: v1
-kind: ServiceAccount
-metadata:
- name: "hubble-relay"
- namespace: kube-system
-automountServiceAccountToken: false
----
-# Source: cilium/templates/hubble-ui/serviceaccount.yaml
-apiVersion: v1
-kind: ServiceAccount
-metadata:
- name: "hubble-ui"
- namespace: kube-system
----
-# Source: cilium/templates/cilium-ca-secret.yaml
-apiVersion: v1
-kind: Secret
-metadata:
- name: cilium-ca
- namespace: kube-system
-data:
- ca.crt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURFekNDQWZ1Z0F3SUJBZ0lRUlQyYWl1VkhSaXcwbitVbEwzbDdIekFOQmdrcWhraUc5dzBCQVFzRkFEQVUKTVJJd0VBWURWUVFERXdsRGFXeHBkVzBnUTBFd0hoY05NalV3TXpJM01EZ3pPREEwV2hjTk1qZ3dNekkyTURnegpPREEwV2pBVU1SSXdFQVlEVlFRREV3bERhV3hwZFcwZ1EwRXdnZ0VpTUEwR0NTcUdTSWIzRFFFQkFRVUFBNElCCkR3QXdnZ0VLQW9JQkFRQy9PVDkzR0hFWG5WSkYxYk9JNTRvVC8zZnY4Z3Vicm5BV0t5bmNLVTBBTjM5YUhTSEUKc0g2NmpLR0NDdTE1cUxRSmRXWld3aEhLcHpaTlNHK2dZV2pUN2dyVWd2bkRyNzV5K2M4OWRSRTR3Q1lmOTh4RApRMzdNRnNadUxsL2hTeWxVU1pUM0NUMXhpekhpZkZaeFNHdXRGNE1KZUxXUXpFaENQR0F2WVEraGwxSitrMEliCngzOFVQWlZGWVpYR3kvOHRhR2poUzFyL2JwWVBCV283ODhIQUR1Wmh6OThEYmx6Y084N0kyY21RL3EzRitNNTMKaGZad0d6TDlsU0p5UjdsS0lVWVoxQURjbTFrNzJLengxTEhTcjhmRnA3bTdHbFc0TmNQYXZ1RmFvdm1ZTVMyRwpMODljdzdhZVh4MkZ2WmQwSSs0bURrWjR3dEJaOWUzeWpoQTNBZ01CQUFHallUQmZNQTRHQTFVZER3RUIvd1FFCkF3SUNwREFkQmdOVkhTVUVGakFVQmdnckJnRUZCUWNEQVFZSUt3WUJCUVVIQXdJd0R3WURWUjBUQVFIL0JBVXcKQXdFQi96QWRCZ05WSFE0RUZnUVVQL1BIU0oxVDIvdWdxUDBseitWNko1N09rcmN3RFFZSktvWklodmNOQVFFTApCUUFEZ2dFQkFKL2t6VkU5ODVubnF3bzZGVDZyRkpTZFVHd3dEL2R4SHFrVVFDWmplUDB1VjlLdEpnUEdiL3NUClRPNjloZUtjTlFlSW9SK2pEUmt0T1E2dHBmS3hMeUx6dmR2WE5QNWFpTXpFZ3QzbHVCSjFHcWU2Rldxd3Q3KzQKcGJOSTh6VldySGZrWnljMnYxZGxOUTZSQ0c1VThUVHpJbnlxdmxtTjNLOEJOb004a2pmTEJXc1Z0UXdIRzlTVgpKMGViWk5OK3FDc0xWM2JJdUpoUk5HK1d1UVBQVlU2NnZkVDRxOEdvcGFNMytyZzlCaXRjRXFjNjE0R3p4NkI2Cm9TbTZUSGhSRnFUUzhCcTlPRmQ1ekdmOTJRRy80TTl4YWZVRTA2Q3Z1VExmMVY1U2tHY0FPR3Z6Ri9lTXNITW8KbXJ6d1NrUlRhNW9DTkhRSkd6WVF2N3VSWWwvTG45ST0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo=
- ca.key: LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlFb3dJQkFBS0NBUUVBdnprL2R4aHhGNTFTUmRXemlPZUtFLzkzNy9JTG02NXdGaXNwM0NsTkFEZC9XaDBoCnhMQit1b3loZ2dydGVhaTBDWFZtVnNJUnlxYzJUVWh2b0dGbzArNEsxSUw1dzYrK2N2blBQWFVST01BbUgvZk0KUTBOK3pCYkdiaTVmNFVzcFZFbVU5d2s5Y1lzeDRueFdjVWhyclJlRENYaTFrTXhJUWp4Z0wyRVBvWmRTZnBOQwpHOGQvRkQyVlJXR1Z4c3YvTFdobzRVdGEvMjZXRHdWcU8vUEJ3QTdtWWMvZkEyNWMzRHZPeU5uSmtQNnR4ZmpPCmQ0WDJjQnN5L1pVaWNrZTVTaUZHR2RRQTNKdFpPOWlzOGRTeDBxL0h4YWU1dXhwVnVEWEQycjdoV3FMNW1ERXQKaGkvUFhNTzJubDhkaGIyWGRDUHVKZzVHZU1MUVdmWHQ4bzRRTndJREFRQUJBb0lCQVFDRVZMQzMwVUZLTDBQbApNZ3NwWGRvdkZxYkFkUWRWZURUM0VtM0lmd2hiY3d4OXB4VmNVU3VTRzRvVlBVeXoyV3lsOHdtY1ZuY0xnQVhUCkc2S3NJVnp5OUpPb0Z1K1RYVVoxdUt5VEFqV2x5VXRZNmhvQUhuQmIraExkNHRmVlNEZFVyZ3NabmRwWWZoUksKZ1BNdE9BblQ0aVo3WW41N3N2TDZhMS9BeWZHUVI3NlRONVBHZHBLZ3NpV04wWEVqZEJFLzdOWW0ya3JTZzI3ZgorQ3dzU0lDVmo1ZDU4V244Vll6NlN5MW9rM3JnSHk3Zy9rQzBDUlRSQW1hd3QzeGsvYWlMbDZ6OUtFNXloWktrCjJLLzdoQ2RGbFhtVnBSVGszT2JRMWRjVHpMT0FhL09xeEdvQTg1S2YyRkNoMmwvS2QycG5pZ3k1SlJhdnpaWHEKSFl4RWR6WXBBb0dCQU9VakMzK2hKUU1IMEFOcHFBVHVpSjlOTmpXMTh1Zms0a2NZNU56N2xMQmZxUC9uelY3RQoxeTZsNi9DdkJJSFd2WmxVVUIyOWVFWktpWW1BQVllbjIrdHNuNVNGSXFDL0gwUHFNQW1XMkJnT213bG1GcDkwCnZhdktrMmRvQlpMWk5od25sMGxSSDQrVS9LTGxMTEhZbUFDamFqRUtvUU1MazZlYXJ2RjlUeFlsQW9HQkFOV2sKVldqby93UWlCL3pxQjBDNFdtRERFemRYcWhzOE15Wk9ncHZ2cDcrbDNCQjVvZGZTY1VPUG1tcnBxYU9md2hJTApSVWQ0V0lUK1Fqb24xQXhzNE5wNVViWVJ1WmN3eTNmWDRVdEZKeEQxZ2JUTW83VzZURHArYlJJampjWVRIQzR4CmR6azViZHliblZPcUpJcmk1TVhWend1dDA1UWJBNXRoTGRUbEZIZ3JBb0dBQysrMWE2S2pZUE1KUFFSR0FtdkMKcGpuSmR5YlJUbzgzMG1oVGs4ZHZTWGpTUHZKK1drOTZ2aTFhaXlFeTVPaWcyOW51aW40U1ErY2lIdzJTQnRVUgp1Y1BnMUVYVE5maCswblRpcDcwNTZKYnFNS1Vtekl1eVRiNnh5eE02SnhvMW5XWGFsYkVxS2JxakpvRTF3Tlg5CnBScmVMMHZVYzJvbytQUXY1TmlZUkJrQ2dZQkRPbTVDRUNmSGxpbnJWWDJveVJwdm5wTmFjL1NzazFvSk1PVjYKeW9TQUUySlhzTjdUK24zMzQ2V1ZCU2lEdTg4blBISzFsZEpOTDNHSnVXVEo4QzBsc3MyYXZLTXBSNHFQa05odApoanBQNU9ISEVpcG1FbFRRaDE0dE9oRHdCcTEwZjg1anJJVzh5UjlONTVtT051NEVyMDV5LzJVNEV2UWtuZ1Q5CkhJVS9kd0tCZ0Zpb1lqTDhWbDF5ZVFSZStjN0JZaktSd2greVdzYWEvMFFwWUFKdFVmaXV5MjZyb2FjMFJFS1UKS2hGUE9hTDJCWTBOMnVlNjhXcDc3TkFVZll5QlZ5amZtZ0JEOTFUODVpZ091aXdSclAzMEliYmsrNnR2eGNEVgp6QzIzVTFmb1diOC9rQjl3QVhwNHRDVWw3Um1sQy9zUlV0MkNYSFNmOW9LMXEvYnJwUFJLCi0tLS0tRU5EIFJTQSBQUklWQVRFIEtFWS0tLS0tCg==
----
-# Source: cilium/templates/hubble/tls-helm/relay-client-secret.yaml
-apiVersion: v1
-kind: Secret
-metadata:
- name: hubble-relay-client-certs
- namespace: kube-system
-type: kubernetes.io/tls
-data:
- ca.crt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURFekNDQWZ1Z0F3SUJBZ0lRUlQyYWl1VkhSaXcwbitVbEwzbDdIekFOQmdrcWhraUc5dzBCQVFzRkFEQVUKTVJJd0VBWURWUVFERXdsRGFXeHBkVzBnUTBFd0hoY05NalV3TXpJM01EZ3pPREEwV2hjTk1qZ3dNekkyTURnegpPREEwV2pBVU1SSXdFQVlEVlFRREV3bERhV3hwZFcwZ1EwRXdnZ0VpTUEwR0NTcUdTSWIzRFFFQkFRVUFBNElCCkR3QXdnZ0VLQW9JQkFRQy9PVDkzR0hFWG5WSkYxYk9JNTRvVC8zZnY4Z3Vicm5BV0t5bmNLVTBBTjM5YUhTSEUKc0g2NmpLR0NDdTE1cUxRSmRXWld3aEhLcHpaTlNHK2dZV2pUN2dyVWd2bkRyNzV5K2M4OWRSRTR3Q1lmOTh4RApRMzdNRnNadUxsL2hTeWxVU1pUM0NUMXhpekhpZkZaeFNHdXRGNE1KZUxXUXpFaENQR0F2WVEraGwxSitrMEliCngzOFVQWlZGWVpYR3kvOHRhR2poUzFyL2JwWVBCV283ODhIQUR1Wmh6OThEYmx6Y084N0kyY21RL3EzRitNNTMKaGZad0d6TDlsU0p5UjdsS0lVWVoxQURjbTFrNzJLengxTEhTcjhmRnA3bTdHbFc0TmNQYXZ1RmFvdm1ZTVMyRwpMODljdzdhZVh4MkZ2WmQwSSs0bURrWjR3dEJaOWUzeWpoQTNBZ01CQUFHallUQmZNQTRHQTFVZER3RUIvd1FFCkF3SUNwREFkQmdOVkhTVUVGakFVQmdnckJnRUZCUWNEQVFZSUt3WUJCUVVIQXdJd0R3WURWUjBUQVFIL0JBVXcKQXdFQi96QWRCZ05WSFE0RUZnUVVQL1BIU0oxVDIvdWdxUDBseitWNko1N09rcmN3RFFZSktvWklodmNOQVFFTApCUUFEZ2dFQkFKL2t6VkU5ODVubnF3bzZGVDZyRkpTZFVHd3dEL2R4SHFrVVFDWmplUDB1VjlLdEpnUEdiL3NUClRPNjloZUtjTlFlSW9SK2pEUmt0T1E2dHBmS3hMeUx6dmR2WE5QNWFpTXpFZ3QzbHVCSjFHcWU2Rldxd3Q3KzQKcGJOSTh6VldySGZrWnljMnYxZGxOUTZSQ0c1VThUVHpJbnlxdmxtTjNLOEJOb004a2pmTEJXc1Z0UXdIRzlTVgpKMGViWk5OK3FDc0xWM2JJdUpoUk5HK1d1UVBQVlU2NnZkVDRxOEdvcGFNMytyZzlCaXRjRXFjNjE0R3p4NkI2Cm9TbTZUSGhSRnFUUzhCcTlPRmQ1ekdmOTJRRy80TTl4YWZVRTA2Q3Z1VExmMVY1U2tHY0FPR3Z6Ri9lTXNITW8KbXJ6d1NrUlRhNW9DTkhRSkd6WVF2N3VSWWwvTG45ST0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo=
- tls.crt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURTVENDQWpHZ0F3SUJBZ0lSQUt2ZTNTYlNpbkw4S3NCd3cycGp5SDR3RFFZSktvWklodmNOQVFFTEJRQXcKRkRFU01CQUdBMVVFQXhNSlEybHNhWFZ0SUVOQk1CNFhEVEkxTURNeU56QTRNemd3TkZvWERUSTJNRE15TnpBNApNemd3TkZvd0l6RWhNQjhHQTFVRUF3d1lLaTVvZFdKaWJHVXRjbVZzWVhrdVkybHNhWFZ0TG1sdk1JSUJJakFOCkJna3Foa2lHOXcwQkFRRUZBQU9DQVE4QU1JSUJDZ0tDQVFFQTNnbnlLdG42Zmw0aW5kZEJ1K25NMVRyRnlwWEMKdDFBK2JaUkdBU1hocklpVHlmZWFxQkdqQTBxSjh2Y2VqVlFvT01tdllpbDlmWFRnTlp2SUJzUEhLOXF4M0RvcApmeTRMaWlPcWhBZHNaZXJXZERzU2lvYWgraXhYb1l0SUxzMzU2elB6R3FPTW5OSWRncE5BbS9xWFZ4VUtldmFrCmZnZnVMTDh0dGhVV1FjNUJwelQ0bzlwQmxuaUs4VjE5WmE5djZ3RmRJQmVOVzB1QzRmRFJRbnpINzViVHIvakcKU2lFOGQ0Rm9xbjd5L1FpMTdUN052SjZIVHVUTnU4SzM3SEo5U0I2SEFPTXFtaTNKMFRXSkduVDBaV3RrSEVObworMzFSYWFyUFRmRm5LSUtlOHptRmQ4MUtxVFY5c25JaGJHcktRMFBXNDI5NEEwS3hXRmVvbVRweER3SURBUUFCCm80R0dNSUdETUE0R0ExVWREd0VCL3dRRUF3SUZvREFkQmdOVkhTVUVGakFVQmdnckJnRUZCUWNEQVFZSUt3WUIKQlFVSEF3SXdEQVlEVlIwVEFRSC9CQUl3QURBZkJnTlZIU01FR0RBV2dCUS84OGRJblZQYis2Q28vU1hQNVhvbgpuczZTdHpBakJnTlZIUkVFSERBYWdoZ3FMbWgxWW1Kc1pTMXlaV3hoZVM1amFXeHBkVzB1YVc4d0RRWUpLb1pJCmh2Y05BUUVMQlFBRGdnRUJBSENvZ1FnQWV2TmtwSkh2WWRBUE9QM3g3WTNpaFVaRThBdVY0RERPUkJVUmpCZjEKekkrODA2TEpsTDFMeXZmWGdnY2pEM1YrTlNrWHhlWis1Y0gvV2p1SXl2Qm5JT2dYM2R2L3pCekRKSlJOdFg1ZQpmQzJIdDZOckJETlRmM2NOTVNaUVEyMHZjbmxIV1BEUG5uUlQzaDBwUWNZbFpKa3lkU1g2Q1ZzdS8rRFdkTnR1ClpMdmZPTkZDOW0rMXNJcGJxbkpneW9UVjQ1UzBPbTdYcGtyaUM4K2MwbUpQek9ZOVd5VnE5VHFPZ1lJRS9aV2QKNUc3K2VqTWJoOGVFaWtmclMvb3hrM2M3MG5FaU5wRWxvUmg5YjkreXo3RWtZMlo2K3dMSTEyaXEwb0VoVHBGbgpIMWF0TVJGRjErbHdrNnFwbjNYODV0Wm80eDVDTy9jNWU4TDJ6SVU9Ci0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K
- tls.key: LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlFcEFJQkFBS0NBUUVBM2dueUt0bjZmbDRpbmRkQnUrbk0xVHJGeXBYQ3QxQStiWlJHQVNYaHJJaVR5ZmVhCnFCR2pBMHFKOHZjZWpWUW9PTW12WWlsOWZYVGdOWnZJQnNQSEs5cXgzRG9wZnk0TGlpT3FoQWRzWmVyV2REc1MKaW9haCtpeFhvWXRJTHMzNTZ6UHpHcU9Nbk5JZGdwTkFtL3FYVnhVS2V2YWtmZ2Z1TEw4dHRoVVdRYzVCcHpUNApvOXBCbG5pSzhWMTlaYTl2NndGZElCZU5XMHVDNGZEUlFuekg3NWJUci9qR1NpRThkNEZvcW43eS9RaTE3VDdOCnZKNkhUdVROdThLMzdISjlTQjZIQU9NcW1pM0owVFdKR25UMFpXdGtIRU5vKzMxUmFhclBUZkZuS0lLZTh6bUYKZDgxS3FUVjlzbkloYkdyS1EwUFc0Mjk0QTBLeFdGZW9tVHB4RHdJREFRQUJBb0lCQUdueFNQRVZUS1lhK0YxagpwZU9wYktpWkFHTG5EaDgxVkhRb2VUWjNBclMxL2Y0eC9FTVJGenlzeGlGOXM4QXZWWG16RWRHK2hqZmtHazY1CjQ3a0dsUFBoeDFlVEVOV3ovS0dPM3Y5Y0kyYUZwSW1VM2dYbXMwYVEvY2NBbEJKeENUd21iMnBZZXZFZ2VDQWoKYm9KK0VOUG1JRGdIa3Zqbmc4Y3ArUCtXRkNhVkJVb3dTdFA2Y1c1Njk1ZGFPRzY5ajZwWWsyUjdPZFovZUlySgpUU1dxU2VGWlRCZkphV1NlV1JXbkxMelM3R0Uxc042NFQvOU1jSWJaS2VKaGdWSm9zNkFIRldsdmlDcCsxcmtMCmFBeEthaGxKKzZ1bUtweDFCVmVVVGMycENPMTc0MVN3SGk1cWJjTUlUZW1jY1FvdzZDaXptS3o2ckh3TTRXYkIKaGRFSU1sRUNnWUVBM3VSVnoxVGZvTlhTWkRDZCtLRTFZdVlRU2Mxa29odkM5L0pJN2pvbEJVVCswVjZQd0U3TgpjQVAwWVNXejRRVnJaOEJtSVUwTVRMRXhYNERrM1FnWU5rTHNKTEUxNmYyclRtQm5YK25OUUIydHJsWVRtd0RoCkNzYldERktXOUhZa0pWK1A2Mk1jaVYzMDVXSVpYM3k5elByazg2TVFFbjhuaGwxeHRHankxS3NDZ1lFQS93VXIKNlgrcDJJZVJVdHhPYVg5cHpoU0xMMmxPUUgyeFl0b0hGVUkySWdkYlJKS2hTaHladDB2NVZrWitxVmJnQXF4cgpUeGZ6OHBRUE51eUdPZlVXRUtkOFArUE0zM2VRRkIvclI4dHVJYmtvNWs3c1lGWTAraDRnYVpENzZ6di9pTms4CjlObnJ2MVZqRWVOUUFqOEo0ODhIUzJIZzBUY1M2NDFScjl0N0xTMENnWUVBc3BjanhrZE1FNU9meXhWRlRmVGwKVU1Qa0l5NUdScm1WdzVOSEtsSStYWFdGOHY0Z2trYlJ4c0gwYW02S0xXOXBPcTFuWjZGVGpKT2hhdE5kRFZreQpCVEFrQTlaVnk1R3NVOVZjbnZZM3RmclJzZEZZVGljcktiWmR4V3ZiZGk2L0VZNzdRdkNiNmRqMW8yR3gwTmVqClFIMTdPMWZaUmVqS0k1ZjF4a2NjNHBjQ2dZQm9aZWlWSFVNZHRsbmlydmVXcnFkVXl3b3pyWXR3VVVSbDhGV0gKK1dRSnlETXZ2a3N5K01SZ3lrUlhjbUoxMW0rOENIT2huRytzZGJwSXhNa3FQcy82bjJYNURidVlHKzBxbitxRQpxSjM4UmoyZEV1QW5qQU1DQllWRlVpb2ZJWis1bVIwZ0szb2NDTXVXemhTNTVxNll5TzcyaUNvdDI3cnVIUnM4CnY4QU1QUUtCZ1FEQTlMUXBENlhLbHVkaVE5Y1F6VHV5TTRjN1lRRE1MSG82dWltL1A5cWFsTFpuRzhPaEl0QXgKbDk0YVJwNHpmTzRza0RyeG1NdURHNEtlNXlpdTlFSXYxeFFTdmxCN3lhWXVPMER6S2NSR2ZtVEkvQ3lRZzllTApKMXBsM051OVNSZGh5T1p1dkpFY3dwU1p0dTdIdGJOcEYvOGFpRnp3VGlZNmhKK2FBa1Foc1E9PQotLS0tLUVORCBSU0EgUFJJVkFURSBLRVktLS0tLQo=
----
-# Source: cilium/templates/hubble/tls-helm/server-secret.yaml
-apiVersion: v1
-kind: Secret
-metadata:
- name: hubble-server-certs
- namespace: kube-system
-type: kubernetes.io/tls
-data:
- ca.crt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURFekNDQWZ1Z0F3SUJBZ0lRUlQyYWl1VkhSaXcwbitVbEwzbDdIekFOQmdrcWhraUc5dzBCQVFzRkFEQVUKTVJJd0VBWURWUVFERXdsRGFXeHBkVzBnUTBFd0hoY05NalV3TXpJM01EZ3pPREEwV2hjTk1qZ3dNekkyTURnegpPREEwV2pBVU1SSXdFQVlEVlFRREV3bERhV3hwZFcwZ1EwRXdnZ0VpTUEwR0NTcUdTSWIzRFFFQkFRVUFBNElCCkR3QXdnZ0VLQW9JQkFRQy9PVDkzR0hFWG5WSkYxYk9JNTRvVC8zZnY4Z3Vicm5BV0t5bmNLVTBBTjM5YUhTSEUKc0g2NmpLR0NDdTE1cUxRSmRXWld3aEhLcHpaTlNHK2dZV2pUN2dyVWd2bkRyNzV5K2M4OWRSRTR3Q1lmOTh4RApRMzdNRnNadUxsL2hTeWxVU1pUM0NUMXhpekhpZkZaeFNHdXRGNE1KZUxXUXpFaENQR0F2WVEraGwxSitrMEliCngzOFVQWlZGWVpYR3kvOHRhR2poUzFyL2JwWVBCV283ODhIQUR1Wmh6OThEYmx6Y084N0kyY21RL3EzRitNNTMKaGZad0d6TDlsU0p5UjdsS0lVWVoxQURjbTFrNzJLengxTEhTcjhmRnA3bTdHbFc0TmNQYXZ1RmFvdm1ZTVMyRwpMODljdzdhZVh4MkZ2WmQwSSs0bURrWjR3dEJaOWUzeWpoQTNBZ01CQUFHallUQmZNQTRHQTFVZER3RUIvd1FFCkF3SUNwREFkQmdOVkhTVUVGakFVQmdnckJnRUZCUWNEQVFZSUt3WUJCUVVIQXdJd0R3WURWUjBUQVFIL0JBVXcKQXdFQi96QWRCZ05WSFE0RUZnUVVQL1BIU0oxVDIvdWdxUDBseitWNko1N09rcmN3RFFZSktvWklodmNOQVFFTApCUUFEZ2dFQkFKL2t6VkU5ODVubnF3bzZGVDZyRkpTZFVHd3dEL2R4SHFrVVFDWmplUDB1VjlLdEpnUEdiL3NUClRPNjloZUtjTlFlSW9SK2pEUmt0T1E2dHBmS3hMeUx6dmR2WE5QNWFpTXpFZ3QzbHVCSjFHcWU2Rldxd3Q3KzQKcGJOSTh6VldySGZrWnljMnYxZGxOUTZSQ0c1VThUVHpJbnlxdmxtTjNLOEJOb004a2pmTEJXc1Z0UXdIRzlTVgpKMGViWk5OK3FDc0xWM2JJdUpoUk5HK1d1UVBQVlU2NnZkVDRxOEdvcGFNMytyZzlCaXRjRXFjNjE0R3p4NkI2Cm9TbTZUSGhSRnFUUzhCcTlPRmQ1ekdmOTJRRy80TTl4YWZVRTA2Q3Z1VExmMVY1U2tHY0FPR3Z6Ri9lTXNITW8KbXJ6d1NrUlRhNW9DTkhRSkd6WVF2N3VSWWwvTG45ST0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo=
- tls.crt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURWakNDQWo2Z0F3SUJBZ0lRQTVCamtGUzh6NWtOclNiZDNDNVZ1akFOQmdrcWhraUc5dzBCQVFzRkFEQVUKTVJJd0VBWURWUVFERXdsRGFXeHBkVzBnUTBFd0hoY05NalV3TXpJM01EZ3pPREEwV2hjTk1qWXdNekkzTURnegpPREEwV2pBcU1TZ3dKZ1lEVlFRRERCOHFMbVJsWm1GMWJIUXVhSFZpWW14bExXZHljR011WTJsc2FYVnRMbWx2Ck1JSUJJakFOQmdrcWhraUc5dzBCQVFFRkFBT0NBUThBTUlJQkNnS0NBUUVBb05tZkxvd2VPWmJyMVhKRjBIOXYKZS9SREVsSllwcUthUnV6SFBJRXhENU1QM0c1ZjJvS0hCenhpWjdBejMxcHZnNCt3ZnhjTml4M283bXd6TkFJOApFaGhKalRNZ25RTGJSdWNXZWNmVHJGb3drYytNM3RLb0U0VlhrM0NBNkhzdHhudjF1YWRiOFJSdm1sd0l0NGsxCnd5MWErWnVsaXcwbHFOT1ZpYitxaFNOeUJjelNnV0VLL3VjTkVkL2ZBNnRBcDFpMndDWHNSTDZaQmNnaFVGOGYKRUZhVkdGdFBHRUtsWG05Syt0b1QvWHBtZjFVNHlUUW93UkpPZ2xYK3pxSU56U21oemNhbDVwUURENU9vaEpidwpkcHZRd0t0Szg0anBuOVNmSkZRcFZuUzc3ZHk1YmNTUmx5aVFUSXkyYzNWa0lrZFU3YUd0YlNuOEkvamR2U3BKCm9RSURBUUFCbzRHTk1JR0tNQTRHQTFVZER3RUIvd1FFQXdJRm9EQWRCZ05WSFNVRUZqQVVCZ2dyQmdFRkJRY0QKQVFZSUt3WUJCUVVIQXdJd0RBWURWUjBUQVFIL0JBSXdBREFmQmdOVkhTTUVHREFXZ0JRLzg4ZEluVlBiKzZDbwovU1hQNVhvbm5zNlN0ekFxQmdOVkhSRUVJekFoZ2g4cUxtUmxabUYxYkhRdWFIVmlZbXhsTFdkeWNHTXVZMmxzCmFYVnRMbWx2TUEwR0NTcUdTSWIzRFFFQkN3VUFBNElCQVFDQkQ5YW8wL2VodE5NaHJ2QWgvOW02amRFdHlUUlcKc0ZXVW9jZSszZWYrMm1TOUVUdW84VmE0MEovSThraStZT0Q4TE9nOEwwcmo0dTJBVitpUC91VW5VQkVjTzg1cQo2WUZCMWNPVytYdUg3bS9WR0dBcGlHRkt0Q3doODBhNk5BMzRXdmI3MGw4WHByRzRabDQvVlk2Q3hmb1ZmZTVDClpzTXMrUEJ5UmQ4ZEF1WVM4M3pDdWkrM1hEOXRyNFZyVHJmQmhUYmpRZmpTbHI1RnpybXpVTVdTRlFlenJtYWYKeThFblJzYVNZWEhJZHlTZWxmL2d3dlk4NGhMejdqVUthNEF2SUxaRUNYREorUkV4c3Vkb3JPc0cybmVQOVd6LwpiZEdYL3N6TDlteEV3TU8xSVU3aUdvNDFhOUU5Q1hQeU9jbGtydmF3WlM2TmwvOEpJSy9vU09LRgotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg==
- tls.key: LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlFcEFJQkFBS0NBUUVBb05tZkxvd2VPWmJyMVhKRjBIOXZlL1JERWxKWXBxS2FSdXpIUElFeEQ1TVAzRzVmCjJvS0hCenhpWjdBejMxcHZnNCt3ZnhjTml4M283bXd6TkFJOEVoaEpqVE1nblFMYlJ1Y1dlY2ZUckZvd2tjK00KM3RLb0U0VlhrM0NBNkhzdHhudjF1YWRiOFJSdm1sd0l0NGsxd3kxYStadWxpdzBscU5PVmliK3FoU055QmN6UwpnV0VLL3VjTkVkL2ZBNnRBcDFpMndDWHNSTDZaQmNnaFVGOGZFRmFWR0Z0UEdFS2xYbTlLK3RvVC9YcG1mMVU0CnlUUW93UkpPZ2xYK3pxSU56U21oemNhbDVwUURENU9vaEpid2RwdlF3S3RLODRqcG45U2ZKRlFwVm5TNzdkeTUKYmNTUmx5aVFUSXkyYzNWa0lrZFU3YUd0YlNuOEkvamR2U3BKb1FJREFRQUJBb0lCQUE0RHAvallRSEhvRWxnZwpnckN0eWJyVkIxcjdUYm9IQk1ZVkx5NjRXdHZ1aGoxQWFKMlRMY3lWbUVWTWR0aUkrcHBOMUtUZUQxSUZveHZTCjFZczhmcHRjL2x1TkR0UmZRVmVtSHJFeGxlRzRZZTd5VkdXOW1PTktoTnpWdW9XRzVmQWJNTWVpSzBLUDgwRlIKcjNpK1BhN3Bycmw4dENtdlZZNm1FS3RWeGJlZndaekw5NWQ4cWMreGxCWTZnQ3JRREQwVFRHbHY2c29uOGRBQgpIUmh1czJOeVdoS2lWNDRvYVlIUzZQMnNzbENqZHM5aU9tZGxiRGk4VTBZVTJTVjF3bnVuWXVUV3ArZzRVUGNTCjFxbElON3V2dkt0RGtFZUpQZld3OXBvVlNBQ09HTFE1emszMTE1VEJOeG5zd2I5aVJSaXg4V2J2NnU3R0szRm8KZkJ6YUhCa0NnWUVBek9vbXBsS1BzbjJHVFNVSlZ1aERBbGFnS3ljWlc1RjlvR1paQ21IWmNSK0Q3NWhZSUt5bQpreElGL3FjM3NtRXFHaytQRUZkQ2dENlZ3YS92Ym5PUHJxTjZ2TVBwaTFsUGJEZDRDTnp2RjVUM1pOOVpuUXhFCnRlQ3k3SmRiUGE4bklSV3diMFdUdTIvM1YrbTIyU293RW9QZzd3UnY1b3NMLzVtRXJkaTVrd01DZ1lFQXlQTTYKWUFqM2ZGczFBSDZGWFhUTHVoaHVPL0M2bVdVSDhiaC96dHdQVTZPaGg4YW0wTWtMWFJJRTdJa3QvbUQydnFkWAo5Mm40cTE4NUI3ekpEWnRwaCtYbWpiRks2YVRYSkYzUGdnOHVrbEZOUDdnUHkxckhmMkpJM25EU2N3NzBQV1dPCkxycXpVcXlVMk5iZkJtL3FwTWxreDV2WGs5VGkrY2V6SjZ3MWZZc0NnWUVBblF0N01ZOG02Q0hCc3hBR2QxK3MKTTNlV2Exd2MyQXZLSHdzeGhINTNlbll2MUltS1NRaG81V0F6emZYei9yYnU0SmxHeUx4SGtDZm0rRGxSSS9KWApGUDNmakJvTnZDbG5jRVdXcmh3ODlYTEtvN2wxWWFKTWFxUGFITzduRTRCUVdjL1lsMzdZZVJlWGxmM2RqOVFBClljQkd2SWhIQm9aY1hMa1ljWEFlMFdjQ2dZRUF2YW5VVHhBbmhwZ1BUb2djR0RsRWtFQyt5cFJMaXJ4dzR4VGcKakwzbk9Xb1g2Njd6Zzl3OWZYcWtheHlPN0tsWU9scDRERzFZNTJiWXpSVENMcldneFVPWWQrUCtjTWZFeCs1RApETjZIZGxLR05yM2pLbXhrSHpjRXorNXNzMnhKMmlrZ0tick54aHBkNHN2cmRJZnRUOXRGNDZDVHVGUVdadXExCmpaeGNZRUVDZ1lCb0VDRExKRnlpOEVKR01tRG5rUUl0eThxcjZuM1gyenRZa1MwUDFXRUk2eGlvR09oVWhlMFMKNlp5eTN1N3BaV3hpQVhhYnZSSDl2QjJENGJnQXprbWZLMG51VUdSMDJMMXlERXEyM1FNY0NlYVlVM2MrZG9vbAp1b2J5SC9henlMdVVIalNZc3dKa01mbmN0TWFydEdRY2RUKys0RGQ2WlFMWE5zTmUvOGtRNUE9PQotLS0tLUVORCBSU0EgUFJJVkFURSBLRVktLS0tLQo=
----
# Source: cilium/templates/cilium-configmap.yaml
apiVersion: v1
kind: ConfigMap
@@ -110,6 +62,7 @@ data:
# If you want to run cilium in debug mode change this value to true
debug: "false"
debug-verbose: ""
+ metrics-sampling-interval: "5m"
# The agent can be put into the following three policy enforcement modes
# default, always and never.
# https://docs.cilium.io/en/latest/security/policy/intro/#policy-enforcement-modes
@@ -158,6 +111,9 @@ data:
# bpf-policy-map-max specifies the maximum number of entries in endpoint
# policy map (per endpoint)
bpf-policy-map-max: "16384"
+ # bpf-policy-stats-map-max specifies the maximum number of entries in global
+ # policy stats map
+ bpf-policy-stats-map-max: "65536"
# bpf-lb-map-max specifies the maximum number of entries in bpf lb service,
# backend and affinity maps.
bpf-lb-map-max: "65536"
@@ -189,7 +145,7 @@ data:
preallocate-bpf-maps: "false"
# Name of the cluster. Only relevant when building a mesh of clusters.
- cluster-name: default
+ cluster-name: "default"
# Unique ID of the cluster. Must be unique across all conneted clusters and
# in the range of 1 and 255. Only relevant when building a mesh of clusters.
cluster-id: "0"
@@ -208,7 +164,6 @@ data:
# Enables L7 proxy for L7 policy enforcement and visibility
enable-l7-proxy: "true"
-
enable-ipv4-masquerade: "true"
enable-ipv4-big-tcp: "false"
enable-ipv6-big-tcp: "false"
@@ -223,8 +178,8 @@ data:
auto-direct-node-routes: "false"
direct-routing-skip-unreachable: "false"
- enable-local-redirect-policy: "false"
- enable-runtime-device-detection: "true"
+
+
kube-proxy-replacement: "true"
kube-proxy-replacement-healthz-bind-address: ""
@@ -235,10 +190,8 @@ data:
node-port-bind-protection: "true"
enable-auto-protect-node-port-range: "true"
bpf-lb-acceleration: "disabled"
- enable-experimental-lb: "false"
enable-svc-source-range-check: "true"
- enable-l2-neigh-discovery: "true"
- arping-refresh-period: "30s"
+ enable-l2-neigh-discovery: "false"
k8s-require-ipv4-pod-cidr: "false"
k8s-require-ipv6-pod-cidr: "false"
enable-k8s-networkpolicy: "true"
@@ -255,32 +208,7 @@ data:
synchronize-k8s-nodes: "true"
operator-api-serve-addr: "127.0.0.1:9234"
- enable-hubble: "true"
- # UNIX domain socket for Hubble server to listen to.
- hubble-socket-path: "/var/run/cilium/hubble.sock"
- # Address to expose Hubble metrics (e.g. ":7070"). Metrics server will be disabled if this
- # field is not set.
- hubble-metrics-server: ":9965"
- hubble-metrics-server-enable-tls: "false"
- # A space separated list of metrics to enable. See [0] for available metrics.
- #
- # https://github.com/cilium/hubble/blob/master/Documentation/metrics.md
- hubble-metrics:
- dns:query;ignoreAAAA
- drop
- tcp
- flow
- icmp
- http
- enable-hubble-open-metrics: "false"
- hubble-export-file-max-size-mb: "10"
- hubble-export-file-max-backups: "5"
- # An additional address for Hubble server to listen to (e.g. ":4244").
- hubble-listen-address: ":4244"
- hubble-disable-tls: "false"
- hubble-tls-cert-file: /var/lib/cilium/tls/hubble/server.crt
- hubble-tls-key-file: /var/lib/cilium/tls/hubble/server.key
- hubble-tls-client-ca-files: /var/lib/cilium/tls/hubble/client-ca.crt
+ enable-hubble: "false"
ipam: "kubernetes"
ipam-cilium-node-update-rate: "15s"
@@ -294,7 +222,8 @@ data:
procfs: "/host/proc"
bpf-root: "/sys/fs/bpf"
cgroup-root: "/run/cilium/cgroupv2"
- enable-k8s-terminating-endpoint: "true"
+
+ identity-management-mode: "agent"
enable-sctp: "false"
remove-cilium-node-taints: "true"
set-cilium-node-taints: "true"
@@ -309,6 +238,7 @@ data:
tofqdns-idle-connection-grace-period: "0s"
tofqdns-max-deferred-connection-deletes: "10000"
tofqdns-proxy-response-max-delay: "100ms"
+ tofqdns-preallocate-identities: "true"
agent-not-ready-taint-key: "node.cilium.io/agent-not-ready"
mesh-auth-enabled: "true"
@@ -333,6 +263,7 @@ data:
max-connected-clusters: "255"
clustermesh-enable-endpoint-sync: "false"
clustermesh-enable-mcs-api: "false"
+ policy-default-local-cluster: "false"
nat-map-stats-entries: "32"
nat-map-stats-interval: "30s"
@@ -353,38 +284,7 @@ metadata:
data:
# Keep the key name as bootstrap-config.json to avoid breaking changes
bootstrap-config.json: |
- {"admin":{"address":{"pipe":{"path":"/var/run/cilium/envoy/sockets/admin.sock"}}},"applicationLogConfig":{"logFormat":{"textFormat":"[%Y-%m-%d %T.%e][%t][%l][%n] [%g:%#] %v"}},"bootstrapExtensions":[{"name":"envoy.bootstrap.internal_listener","typedConfig":{"@type":"type.googleapis.com/envoy.extensions.bootstrap.internal_listener.v3.InternalListener"}}],"dynamicResources":{"cdsConfig":{"apiConfigSource":{"apiType":"GRPC","grpcServices":[{"envoyGrpc":{"clusterName":"xds-grpc-cilium"}}],"setNodeOnFirstMessageOnly":true,"transportApiVersion":"V3"},"initialFetchTimeout":"30s","resourceApiVersion":"V3"},"ldsConfig":{"apiConfigSource":{"apiType":"GRPC","grpcServices":[{"envoyGrpc":{"clusterName":"xds-grpc-cilium"}}],"setNodeOnFirstMessageOnly":true,"transportApiVersion":"V3"},"initialFetchTimeout":"30s","resourceApiVersion":"V3"}},"node":{"cluster":"ingress-cluster","id":"host~127.0.0.1~no-id~localdomain"},"overloadManager":{"resourceMonitors":[{"name":"envoy.resource_monitors.global_downstream_max_connections","typedConfig":{"@type":"type.googleapis.com/envoy.extensions.resource_monitors.downstream_connections.v3.DownstreamConnectionsConfig","max_active_downstream_connections":"50000"}}]},"staticResources":{"clusters":[{"circuitBreakers":{"thresholds":[{"maxRetries":128}]},"cleanupInterval":"2.500s","connectTimeout":"2s","lbPolicy":"CLUSTER_PROVIDED","name":"ingress-cluster","type":"ORIGINAL_DST","typedExtensionProtocolOptions":{"envoy.extensions.upstreams.http.v3.HttpProtocolOptions":{"@type":"type.googleapis.com/envoy.extensions.upstreams.http.v3.HttpProtocolOptions","commonHttpProtocolOptions":{"idleTimeout":"60s","maxConnectionDuration":"0s","maxRequestsPerConnection":0},"useDownstreamProtocolConfig":{}}}},{"circuitBreakers":{"thresholds":[{"maxRetries":128}]},"cleanupInterval":"2.500s","connectTimeout":"2s","lbPolicy":"CLUSTER_PROVIDED","name":"egress-cluster-tls","transportSocket":{"name":"cilium.tls_wrapper","typedConfig":{"@type":"type.googleapis.com/cilium.UpstreamTlsWrapperContext"}},"type":"ORIGINAL_DST","typedExtensionProtocolOptions":{"envoy.extensions.upstreams.http.v3.HttpProtocolOptions":{"@type":"type.googleapis.com/envoy.extensions.upstreams.http.v3.HttpProtocolOptions","commonHttpProtocolOptions":{"idleTimeout":"60s","maxConnectionDuration":"0s","maxRequestsPerConnection":0},"upstreamHttpProtocolOptions":{},"useDownstreamProtocolConfig":{}}}},{"circuitBreakers":{"thresholds":[{"maxRetries":128}]},"cleanupInterval":"2.500s","connectTimeout":"2s","lbPolicy":"CLUSTER_PROVIDED","name":"egress-cluster","type":"ORIGINAL_DST","typedExtensionProtocolOptions":{"envoy.extensions.upstreams.http.v3.HttpProtocolOptions":{"@type":"type.googleapis.com/envoy.extensions.upstreams.http.v3.HttpProtocolOptions","commonHttpProtocolOptions":{"idleTimeout":"60s","maxConnectionDuration":"0s","maxRequestsPerConnection":0},"useDownstreamProtocolConfig":{}}}},{"circuitBreakers":{"thresholds":[{"maxRetries":128}]},"cleanupInterval":"2.500s","connectTimeout":"2s","lbPolicy":"CLUSTER_PROVIDED","name":"ingress-cluster-tls","transportSocket":{"name":"cilium.tls_wrapper","typedConfig":{"@type":"type.googleapis.com/cilium.UpstreamTlsWrapperContext"}},"type":"ORIGINAL_DST","typedExtensionProtocolOptions":{"envoy.extensions.upstreams.http.v3.HttpProtocolOptions":{"@type":"type.googleapis.com/envoy.extensions.upstreams.http.v3.HttpProtocolOptions","commonHttpProtocolOptions":{"idleTimeout":"60s","maxConnectionDuration":"0s","maxRequestsPerConnection":0},"upstreamHttpProtocolOptions":{},"useDownstreamProtocolConfig":{}}}},{"connectTimeout":"2s","loadAssignment":{"clusterName":"xds-grpc-cilium","endpoints":[{"lbEndpoints":[{"endpoint":{"address":{"pipe":{"path":"/var/run/cilium/envoy/sockets/xds.sock"}}}}]}]},"name":"xds-grpc-cilium","type":"STATIC","typedExtensionProtocolOptions":{"envoy.extensions.upstreams.http.v3.HttpProtocolOptions":{"@type":"type.googleapis.com/envoy.extensions.upstreams.http.v3.HttpProtocolOptions","explicitHttpConfig":{"http2ProtocolOptions":{}}}}},{"connectTimeout":"2s","loadAssignment":{"clusterName":"/envoy-admin","endpoints":[{"lbEndpoints":[{"endpoint":{"address":{"pipe":{"path":"/var/run/cilium/envoy/sockets/admin.sock"}}}}]}]},"name":"/envoy-admin","type":"STATIC"}],"listeners":[{"address":{"socketAddress":{"address":"0.0.0.0","portValue":9964}},"filterChains":[{"filters":[{"name":"envoy.filters.network.http_connection_manager","typedConfig":{"@type":"type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager","httpFilters":[{"name":"envoy.filters.http.router","typedConfig":{"@type":"type.googleapis.com/envoy.extensions.filters.http.router.v3.Router"}}],"internalAddressConfig":{"cidrRanges":[{"addressPrefix":"10.0.0.0","prefixLen":8},{"addressPrefix":"172.16.0.0","prefixLen":12},{"addressPrefix":"192.168.0.0","prefixLen":16},{"addressPrefix":"127.0.0.1","prefixLen":32}]},"routeConfig":{"virtualHosts":[{"domains":["*"],"name":"prometheus_metrics_route","routes":[{"match":{"prefix":"/metrics"},"name":"prometheus_metrics_route","route":{"cluster":"/envoy-admin","prefixRewrite":"/stats/prometheus"}}]}]},"statPrefix":"envoy-prometheus-metrics-listener","streamIdleTimeout":"0s"}}]}],"name":"envoy-prometheus-metrics-listener"},{"address":{"socketAddress":{"address":"127.0.0.1","portValue":9878}},"filterChains":[{"filters":[{"name":"envoy.filters.network.http_connection_manager","typedConfig":{"@type":"type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager","httpFilters":[{"name":"envoy.filters.http.router","typedConfig":{"@type":"type.googleapis.com/envoy.extensions.filters.http.router.v3.Router"}}],"internalAddressConfig":{"cidrRanges":[{"addressPrefix":"10.0.0.0","prefixLen":8},{"addressPrefix":"172.16.0.0","prefixLen":12},{"addressPrefix":"192.168.0.0","prefixLen":16},{"addressPrefix":"127.0.0.1","prefixLen":32}]},"routeConfig":{"virtual_hosts":[{"domains":["*"],"name":"health","routes":[{"match":{"prefix":"/healthz"},"name":"health","route":{"cluster":"/envoy-admin","prefixRewrite":"/ready"}}]}]},"statPrefix":"envoy-health-listener","streamIdleTimeout":"0s"}}]}],"name":"envoy-health-listener"}]}}
----
-# Source: cilium/templates/hubble-relay/configmap.yaml
-apiVersion: v1
-kind: ConfigMap
-metadata:
- name: hubble-relay-config
- namespace: kube-system
-data:
- config.yaml: |
- cluster-name: default
- peer-service: "hubble-peer.kube-system.svc.cluster.local.:443"
- listen-address: :4245
- gops: true
- gops-port: "9893"
- retry-timeout:
- sort-buffer-len-max:
- sort-buffer-drain-timeout:
- tls-hubble-client-cert-file: /var/lib/hubble-relay/tls/client.crt
- tls-hubble-client-key-file: /var/lib/hubble-relay/tls/client.key
- tls-hubble-server-ca-files: /var/lib/hubble-relay/tls/hubble-server-ca.crt
-
- disable-server-tls: true
----
-# Source: cilium/templates/hubble-ui/configmap.yaml
-apiVersion: v1
-kind: ConfigMap
-metadata:
- name: hubble-ui-nginx
- namespace: kube-system
-data:
- nginx.conf: "server {\n listen 8081;\n listen [::]:8081;\n server_name localhost;\n root /app;\n index index.html;\n client_max_body_size 1G;\n\n location / {\n proxy_set_header Host $host;\n proxy_set_header X-Real-IP $remote_addr;\n\n location /api {\n proxy_http_version 1.1;\n proxy_pass_request_headers on;\n proxy_pass http://127.0.0.1:8090;\n }\n location / {\n # double `/index.html` is required here \n try_files $uri $uri/ /index.html /index.html;\n }\n\n # Liveness probe\n location /healthz {\n access_log off;\n add_header Content-Type text/plain;\n return 200 'ok';\n }\n }\n}"
+ {"admin":{"address":{"pipe":{"path":"/var/run/cilium/envoy/sockets/admin.sock"}}},"applicationLogConfig":{"logFormat":{"textFormat":"[%Y-%m-%d %T.%e][%t][%l][%n] [%g:%#] %v"}},"bootstrapExtensions":[{"name":"envoy.bootstrap.internal_listener","typedConfig":{"@type":"type.googleapis.com/envoy.extensions.bootstrap.internal_listener.v3.InternalListener"}}],"dynamicResources":{"cdsConfig":{"apiConfigSource":{"apiType":"GRPC","grpcServices":[{"envoyGrpc":{"clusterName":"xds-grpc-cilium"}}],"setNodeOnFirstMessageOnly":true,"transportApiVersion":"V3"},"initialFetchTimeout":"30s","resourceApiVersion":"V3"},"ldsConfig":{"apiConfigSource":{"apiType":"GRPC","grpcServices":[{"envoyGrpc":{"clusterName":"xds-grpc-cilium"}}],"setNodeOnFirstMessageOnly":true,"transportApiVersion":"V3"},"initialFetchTimeout":"30s","resourceApiVersion":"V3"}},"node":{"cluster":"ingress-cluster","id":"host~127.0.0.1~no-id~localdomain"},"overloadManager":{"resourceMonitors":[{"name":"envoy.resource_monitors.global_downstream_max_connections","typedConfig":{"@type":"type.googleapis.com/envoy.extensions.resource_monitors.downstream_connections.v3.DownstreamConnectionsConfig","max_active_downstream_connections":"50000"}}]},"staticResources":{"clusters":[{"circuitBreakers":{"thresholds":[{"maxRetries":128}]},"cleanupInterval":"2.500s","connectTimeout":"2s","lbPolicy":"CLUSTER_PROVIDED","name":"ingress-cluster","type":"ORIGINAL_DST","typedExtensionProtocolOptions":{"envoy.extensions.upstreams.http.v3.HttpProtocolOptions":{"@type":"type.googleapis.com/envoy.extensions.upstreams.http.v3.HttpProtocolOptions","commonHttpProtocolOptions":{"idleTimeout":"60s","maxConnectionDuration":"0s","maxRequestsPerConnection":0},"useDownstreamProtocolConfig":{}}}},{"circuitBreakers":{"thresholds":[{"maxRetries":128}]},"cleanupInterval":"2.500s","connectTimeout":"2s","lbPolicy":"CLUSTER_PROVIDED","name":"egress-cluster-tls","transportSocket":{"name":"cilium.tls_wrapper","typedConfig":{"@type":"type.googleapis.com/cilium.UpstreamTlsWrapperContext"}},"type":"ORIGINAL_DST","typedExtensionProtocolOptions":{"envoy.extensions.upstreams.http.v3.HttpProtocolOptions":{"@type":"type.googleapis.com/envoy.extensions.upstreams.http.v3.HttpProtocolOptions","commonHttpProtocolOptions":{"idleTimeout":"60s","maxConnectionDuration":"0s","maxRequestsPerConnection":0},"upstreamHttpProtocolOptions":{},"useDownstreamProtocolConfig":{}}}},{"circuitBreakers":{"thresholds":[{"maxRetries":128}]},"cleanupInterval":"2.500s","connectTimeout":"2s","lbPolicy":"CLUSTER_PROVIDED","name":"egress-cluster","type":"ORIGINAL_DST","typedExtensionProtocolOptions":{"envoy.extensions.upstreams.http.v3.HttpProtocolOptions":{"@type":"type.googleapis.com/envoy.extensions.upstreams.http.v3.HttpProtocolOptions","commonHttpProtocolOptions":{"idleTimeout":"60s","maxConnectionDuration":"0s","maxRequestsPerConnection":0},"useDownstreamProtocolConfig":{}}}},{"circuitBreakers":{"thresholds":[{"maxRetries":128}]},"cleanupInterval":"2.500s","connectTimeout":"2s","lbPolicy":"CLUSTER_PROVIDED","name":"ingress-cluster-tls","transportSocket":{"name":"cilium.tls_wrapper","typedConfig":{"@type":"type.googleapis.com/cilium.UpstreamTlsWrapperContext"}},"type":"ORIGINAL_DST","typedExtensionProtocolOptions":{"envoy.extensions.upstreams.http.v3.HttpProtocolOptions":{"@type":"type.googleapis.com/envoy.extensions.upstreams.http.v3.HttpProtocolOptions","commonHttpProtocolOptions":{"idleTimeout":"60s","maxConnectionDuration":"0s","maxRequestsPerConnection":0},"upstreamHttpProtocolOptions":{},"useDownstreamProtocolConfig":{}}}},{"connectTimeout":"2s","loadAssignment":{"clusterName":"xds-grpc-cilium","endpoints":[{"lbEndpoints":[{"endpoint":{"address":{"pipe":{"path":"/var/run/cilium/envoy/sockets/xds.sock"}}}}]}]},"name":"xds-grpc-cilium","type":"STATIC","typedExtensionProtocolOptions":{"envoy.extensions.upstreams.http.v3.HttpProtocolOptions":{"@type":"type.googleapis.com/envoy.extensions.upstreams.http.v3.HttpProtocolOptions","explicitHttpConfig":{"http2ProtocolOptions":{}}}}},{"connectTimeout":"2s","loadAssignment":{"clusterName":"/envoy-admin","endpoints":[{"lbEndpoints":[{"endpoint":{"address":{"pipe":{"path":"/var/run/cilium/envoy/sockets/admin.sock"}}}}]}]},"name":"/envoy-admin","type":"STATIC"}],"listeners":[{"address":{"socketAddress":{"address":"0.0.0.0","portValue":9964}},"filterChains":[{"filters":[{"name":"envoy.filters.network.http_connection_manager","typedConfig":{"@type":"type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager","httpFilters":[{"name":"envoy.filters.http.router","typedConfig":{"@type":"type.googleapis.com/envoy.extensions.filters.http.router.v3.Router"}}],"internalAddressConfig":{"cidrRanges":[{"addressPrefix":"10.0.0.0","prefixLen":8},{"addressPrefix":"172.16.0.0","prefixLen":12},{"addressPrefix":"192.168.0.0","prefixLen":16},{"addressPrefix":"127.0.0.1","prefixLen":32}]},"routeConfig":{"virtualHosts":[{"domains":["*"],"name":"prometheus_metrics_route","routes":[{"match":{"prefix":"/metrics"},"name":"prometheus_metrics_route","route":{"cluster":"/envoy-admin","prefixRewrite":"/stats/prometheus"}}]}]},"statPrefix":"envoy-prometheus-metrics-listener","streamIdleTimeout":"300s"}}]}],"name":"envoy-prometheus-metrics-listener"},{"address":{"socketAddress":{"address":"127.0.0.1","portValue":9878}},"filterChains":[{"filters":[{"name":"envoy.filters.network.http_connection_manager","typedConfig":{"@type":"type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager","httpFilters":[{"name":"envoy.filters.http.router","typedConfig":{"@type":"type.googleapis.com/envoy.extensions.filters.http.router.v3.Router"}}],"internalAddressConfig":{"cidrRanges":[{"addressPrefix":"10.0.0.0","prefixLen":8},{"addressPrefix":"172.16.0.0","prefixLen":12},{"addressPrefix":"192.168.0.0","prefixLen":16},{"addressPrefix":"127.0.0.1","prefixLen":32}]},"routeConfig":{"virtual_hosts":[{"domains":["*"],"name":"health","routes":[{"match":{"prefix":"/healthz"},"name":"health","route":{"cluster":"/envoy-admin","prefixRewrite":"/ready"}}]}]},"statPrefix":"envoy-health-listener","streamIdleTimeout":"300s"}}]}],"name":"envoy-health-listener"}]}}
---
# Source: cilium/templates/cilium-agent/clusterrole.yaml
apiVersion: rbac.authorization.k8s.io/v1
@@ -689,7 +589,6 @@ rules:
- ciliumendpoints.cilium.io
- ciliumendpointslices.cilium.io
- ciliumenvoyconfigs.cilium.io
- - ciliumexternalworkloads.cilium.io
- ciliumidentities.cilium.io
- ciliumlocalredirectpolicies.cilium.io
- ciliumnetworkpolicies.cilium.io
@@ -698,6 +597,7 @@ rules:
- ciliumcidrgroups.cilium.io
- ciliuml2announcementpolicies.cilium.io
- ciliumpodippools.cilium.io
+ - ciliumgatewayclassconfigs.cilium.io
- apiGroups:
- cilium.io
resources:
@@ -738,53 +638,6 @@ rules:
- get
- update
---
-# Source: cilium/templates/hubble-ui/clusterrole.yaml
-kind: ClusterRole
-apiVersion: rbac.authorization.k8s.io/v1
-metadata:
- name: hubble-ui
- labels:
- app.kubernetes.io/part-of: cilium
-
-rules:
-- apiGroups:
- - networking.k8s.io
- resources:
- - networkpolicies
- verbs:
- - get
- - list
- - watch
-- apiGroups:
- - ""
- resources:
- - componentstatuses
- - endpoints
- - namespaces
- - nodes
- - pods
- - services
- verbs:
- - get
- - list
- - watch
-- apiGroups:
- - apiextensions.k8s.io
- resources:
- - customresourcedefinitions
- verbs:
- - get
- - list
- - watch
-- apiGroups:
- - cilium.io
- resources:
- - "*"
- verbs:
- - get
- - list
- - watch
----
# Source: cilium/templates/cilium-agent/clusterrolebinding.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
@@ -817,23 +670,6 @@ subjects:
name: "cilium-operator"
namespace: kube-system
---
-# Source: cilium/templates/hubble-ui/clusterrolebinding.yaml
-kind: ClusterRoleBinding
-apiVersion: rbac.authorization.k8s.io/v1
-metadata:
- name: hubble-ui
- labels:
- app.kubernetes.io/part-of: cilium
-
-roleRef:
- apiGroup: rbac.authorization.k8s.io
- kind: ClusterRole
- name: hubble-ui
-subjects:
-- kind: ServiceAccount
- name: "hubble-ui"
- namespace: kube-system
----
# Source: cilium/templates/cilium-agent/role.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
@@ -857,7 +693,7 @@ apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: cilium-tlsinterception-secrets
- namespace: "cilium-secrets"
+ namespace: "cilium-secrets"
labels:
app.kubernetes.io/part-of: cilium
rules:
@@ -965,93 +801,6 @@ spec:
protocol: TCP
targetPort: envoy-metrics
---
-# Source: cilium/templates/hubble-relay/service.yaml
-kind: Service
-apiVersion: v1
-metadata:
- name: hubble-relay
- namespace: kube-system
- annotations:
- labels:
- k8s-app: hubble-relay
- app.kubernetes.io/name: hubble-relay
- app.kubernetes.io/part-of: cilium
-
-spec:
- type: "ClusterIP"
- selector:
- k8s-app: hubble-relay
- ports:
- - protocol: TCP
- port: 80
- targetPort: grpc
----
-# Source: cilium/templates/hubble-ui/service.yaml
-kind: Service
-apiVersion: v1
-metadata:
- name: hubble-ui
- namespace: kube-system
- labels:
- k8s-app: hubble-ui
- app.kubernetes.io/name: hubble-ui
- app.kubernetes.io/part-of: cilium
-
-spec:
- type: "ClusterIP"
- selector:
- k8s-app: hubble-ui
- ports:
- - name: http
- port: 80
- targetPort: 8081
----
-# Source: cilium/templates/hubble/metrics-service.yaml
-apiVersion: v1
-kind: Service
-metadata:
- name: hubble-metrics
- namespace: kube-system
- labels:
- k8s-app: hubble
- app.kubernetes.io/name: hubble
- app.kubernetes.io/part-of: cilium
-
- annotations:
- prometheus.io/scrape: "true"
- prometheus.io/port: "9965"
-spec:
- clusterIP: None
- type: ClusterIP
- ports:
- - name: hubble-metrics
- port: 9965
- protocol: TCP
- targetPort: hubble-metrics
- selector:
- k8s-app: cilium
----
-# Source: cilium/templates/hubble/peer-service.yaml
-apiVersion: v1
-kind: Service
-metadata:
- name: hubble-peer
- namespace: kube-system
- labels:
- k8s-app: cilium
- app.kubernetes.io/part-of: cilium
- app.kubernetes.io/name: hubble-peer
-
-spec:
- selector:
- k8s-app: cilium
- ports:
- - name: peer-service
- port: 443
- protocol: TCP
- targetPort: 4244
- internalTrafficPolicy: Local
----
# Source: cilium/templates/cilium-agent/daemonset.yaml
apiVersion: apps/v1
kind: DaemonSet
@@ -1074,7 +823,8 @@ spec:
metadata:
annotations:
# ensure pods roll when configmap updates
- cilium.io/cilium-configmap-checksum: "dbf80beb10035c90aba9491427bac0044f75ac64ce7113faa9d1872fcde14f80"
+ cilium.io/cilium-configmap-checksum: "6fa590cd5240c877e79dffeaba7b2228c85d09c536ff7efdb5c06f01c56247b1"
+ kubectl.kubernetes.io/default-container: cilium-agent
labels:
k8s-app: cilium
app.kubernetes.io/name: cilium-agent
@@ -1083,9 +833,11 @@ spec:
securityContext:
appArmorProfile:
type: Unconfined
+ seccompProfile:
+ type: Unconfined
containers:
- name: cilium-agent
- image: "quay.io/cilium/cilium:v1.17.2@sha256:3c4c9932b5d8368619cb922a497ff2ebc8def5f41c18e410bcc84025fcd385b1"
+ image: "quay.io/cilium/cilium:v1.18.1@sha256:65ab17c052d8758b2ad157ce766285e04173722df59bdee1ea6d5fda7149f0e9"
imagePullPolicy: IfNotPresent
command:
- cilium-agent
@@ -1100,7 +852,7 @@ spec:
httpHeaders:
- name: "brief"
value: "true"
- failureThreshold: 105
+ failureThreshold: 300
periodSeconds: 2
successThreshold: 1
initialDelaySeconds: 5
@@ -1113,6 +865,8 @@ spec:
httpHeaders:
- name: "brief"
value: "true"
+ - name: "require-k8s-connectivity"
+ value: "false"
periodSeconds: 30
successThreshold: 1
failureThreshold: 10
@@ -1148,6 +902,10 @@ spec:
resourceFieldRef:
resource: limits.memory
divisor: '1'
+ - name: KUBE_CLIENT_BACKOFF_BASE
+ value: "1"
+ - name: KUBE_CLIENT_BACKOFF_DURATION
+ value: "120"
lifecycle:
postStart:
exec:
@@ -1179,15 +937,6 @@ spec:
exec:
command:
- /cni-uninstall.sh
- ports:
- - name: peer-service
- containerPort: 4244
- hostPort: 4244
- protocol: TCP
- - name: hubble-metrics
- containerPort: 9965
- hostPort: 9965
- protocol: TCP
securityContext:
seLinuxOptions:
level: s0
@@ -1244,14 +993,12 @@ spec:
readOnly: true
- name: xtables-lock
mountPath: /run/xtables.lock
- - name: hubble-tls
- mountPath: /var/lib/cilium/tls/hubble
- readOnly: true
- name: tmp
mountPath: /tmp
+
initContainers:
- name: config
- image: "quay.io/cilium/cilium:v1.17.2@sha256:3c4c9932b5d8368619cb922a497ff2ebc8def5f41c18e410bcc84025fcd385b1"
+ image: "quay.io/cilium/cilium:v1.18.1@sha256:65ab17c052d8758b2ad157ce766285e04173722df59bdee1ea6d5fda7149f0e9"
imagePullPolicy: IfNotPresent
command:
- cilium-dbg
@@ -1274,7 +1021,7 @@ spec:
# Required to mount cgroup2 filesystem on the underlying Kubernetes node.
# We use nsenter command with host's cgroup and mount namespaces enabled.
- name: mount-cgroup
- image: "quay.io/cilium/cilium:v1.17.2@sha256:3c4c9932b5d8368619cb922a497ff2ebc8def5f41c18e410bcc84025fcd385b1"
+ image: "quay.io/cilium/cilium:v1.18.1@sha256:65ab17c052d8758b2ad157ce766285e04173722df59bdee1ea6d5fda7149f0e9"
imagePullPolicy: IfNotPresent
env:
- name: CGROUP_ROOT
@@ -1311,7 +1058,7 @@ spec:
drop:
- ALL
- name: apply-sysctl-overwrites
- image: "quay.io/cilium/cilium:v1.17.2@sha256:3c4c9932b5d8368619cb922a497ff2ebc8def5f41c18e410bcc84025fcd385b1"
+ image: "quay.io/cilium/cilium:v1.18.1@sha256:65ab17c052d8758b2ad157ce766285e04173722df59bdee1ea6d5fda7149f0e9"
imagePullPolicy: IfNotPresent
env:
- name: BIN_PATH
@@ -1349,7 +1096,7 @@ spec:
# from a privileged container because the mount propagation bidirectional
# only works from privileged containers.
- name: mount-bpf-fs
- image: "quay.io/cilium/cilium:v1.17.2@sha256:3c4c9932b5d8368619cb922a497ff2ebc8def5f41c18e410bcc84025fcd385b1"
+ image: "quay.io/cilium/cilium:v1.18.1@sha256:65ab17c052d8758b2ad157ce766285e04173722df59bdee1ea6d5fda7149f0e9"
imagePullPolicy: IfNotPresent
args:
- 'mount | grep "/sys/fs/bpf type bpf" || mount -t bpf bpf /sys/fs/bpf'
@@ -1365,7 +1112,7 @@ spec:
mountPath: /sys/fs/bpf
mountPropagation: Bidirectional
- name: clean-cilium-state
- image: "quay.io/cilium/cilium:v1.17.2@sha256:3c4c9932b5d8368619cb922a497ff2ebc8def5f41c18e410bcc84025fcd385b1"
+ image: "quay.io/cilium/cilium:v1.18.1@sha256:65ab17c052d8758b2ad157ce766285e04173722df59bdee1ea6d5fda7149f0e9"
imagePullPolicy: IfNotPresent
command:
- /init-container.sh
@@ -1412,7 +1159,7 @@ spec:
mountPath: /var/run/cilium # wait-for-kube-proxy
# Install the CNI binaries in an InitContainer so we don't have a writable host mount in the agent
- name: install-cni-binaries
- image: "quay.io/cilium/cilium:v1.17.2@sha256:3c4c9932b5d8368619cb922a497ff2ebc8def5f41c18e410bcc84025fcd385b1"
+ image: "quay.io/cilium/cilium:v1.18.1@sha256:65ab17c052d8758b2ad157ce766285e04173722df59bdee1ea6d5fda7149f0e9"
imagePullPolicy: IfNotPresent
command:
- "/install-plugin.sh"
@@ -1546,21 +1293,6 @@ spec:
hostPath:
path: /proc/sys/kernel
type: Directory
- - name: hubble-tls
- projected:
- # note: the leading zero means this number is in octal representation: do not remove it
- defaultMode: 0400
- sources:
- - secret:
- name: hubble-server-certs
- optional: true
- items:
- - key: tls.crt
- path: server.crt
- - key: tls.key
- path: server.key
- - key: ca.crt
- path: client-ca.crt
---
# Source: cilium/templates/cilium-envoy/daemonset.yaml
apiVersion: apps/v1
@@ -1595,7 +1327,7 @@ spec:
type: Unconfined
containers:
- name: cilium-envoy
- image: "quay.io/cilium/cilium-envoy:v1.31.5-1741765102-efed3defcc70ab5b263a0fc44c93d316b846a211@sha256:377c78c13d2731f3720f931721ee309159e782d882251709cb0fac3b42c03f4b"
+ image: "quay.io/cilium/cilium-envoy:v1.34.4-1754895458-68cffdfa568b6b226d70a7ef81fc65dda3b890bf@sha256:247e908700012f7ef56f75908f8c965215c26a27762f296068645eb55450bda2"
imagePullPolicy: IfNotPresent
command:
- /usr/bin/cilium-envoy-starter
@@ -1743,7 +1475,7 @@ metadata:
spec:
# See docs on ServerCapabilities.LeasesResourceLock in file pkg/k8s/version/version.go
# for more details.
- replicas: 2
+ replicas: 1
selector:
matchLabels:
io.cilium/app: operator
@@ -1755,13 +1487,13 @@ spec:
strategy:
rollingUpdate:
maxSurge: 25%
- maxUnavailable: 50%
+ maxUnavailable: 100%
type: RollingUpdate
template:
metadata:
annotations:
# ensure pods roll when configmap updates
- cilium.io/cilium-configmap-checksum: "dbf80beb10035c90aba9491427bac0044f75ac64ce7113faa9d1872fcde14f80"
+ cilium.io/cilium-configmap-checksum: "6fa590cd5240c877e79dffeaba7b2228c85d09c536ff7efdb5c06f01c56247b1"
prometheus.io/port: "9963"
prometheus.io/scrape: "true"
labels:
@@ -1770,9 +1502,12 @@ spec:
app.kubernetes.io/part-of: cilium
app.kubernetes.io/name: cilium-operator
spec:
+ securityContext:
+ seccompProfile:
+ type: RuntimeDefault
containers:
- name: cilium-operator
- image: "quay.io/cilium/operator-generic:v1.17.2@sha256:81f2d7198366e8dec2903a3a8361e4c68d47d19c68a0d42f0b7b6e3f0523f249"
+ image: "quay.io/cilium/operator-generic:v1.18.1@sha256:97f4553afa443465bdfbc1cc4927c93f16ac5d78e4dd2706736e7395382201bc"
imagePullPolicy: IfNotPresent
command:
- cilium-operator-generic
@@ -1824,6 +1559,11 @@ spec:
- name: cilium-config-path
mountPath: /tmp/cilium/config-map
readOnly: true
+ securityContext:
+ allowPrivilegeEscalation: false
+ capabilities:
+ drop:
+ - ALL
terminationMessagePolicy: FallbackToLogsOnError
hostNetwork: true
restartPolicy: Always
@@ -1842,217 +1582,26 @@ spec:
nodeSelector:
kubernetes.io/os: linux
tolerations:
- - operator: Exists
+ - effect: NoSchedule
+ key: node.cluster.x-k8s.io/uninitialized
+ - effect: NoSchedule
+ key: node.cloudprovider.kubernetes.io/uninitialized
+ value: "true"
+ - key: CriticalAddonsOnly
+ operator: Exists
+ - effect: NoSchedule
+ key: node-role.kubernetes.io/master
+ operator: Exists
+ - effect: NoSchedule
+ key: node-role.kubernetes.io/control-plane
+ operator: Exists
+ - effect: NoSchedule
+ key: node.kubernetes.io/not-ready
+ - key: node.cilium.io/agent-not-ready
+ operator: Exists
+
volumes:
# To read the configuration from the config map
- name: cilium-config-path
configMap:
name: cilium-config
----
-# Source: cilium/templates/hubble-relay/deployment.yaml
-apiVersion: apps/v1
-kind: Deployment
-metadata:
- name: hubble-relay
- namespace: kube-system
- labels:
- k8s-app: hubble-relay
- app.kubernetes.io/name: hubble-relay
- app.kubernetes.io/part-of: cilium
-
-spec:
- replicas: 1
- selector:
- matchLabels:
- k8s-app: hubble-relay
- strategy:
- rollingUpdate:
- maxUnavailable: 1
- type: RollingUpdate
- template:
- metadata:
- annotations:
- # ensure pods roll when configmap updates
- cilium.io/hubble-relay-configmap-checksum: "0aebee6bdee393dd840ea0e068f2efeae387cc07114bb26becb030f0ab1e2397"
- labels:
- k8s-app: hubble-relay
- app.kubernetes.io/name: hubble-relay
- app.kubernetes.io/part-of: cilium
- spec:
- securityContext:
- fsGroup: 65532
- containers:
- - name: hubble-relay
- securityContext:
- capabilities:
- drop:
- - ALL
- runAsGroup: 65532
- runAsNonRoot: true
- runAsUser: 65532
- image: "quay.io/cilium/hubble-relay:v1.17.2@sha256:42a8db5c256c516cacb5b8937c321b2373ad7a6b0a1e5a5120d5028433d586cc"
- imagePullPolicy: IfNotPresent
- command:
- - hubble-relay
- args:
- - serve
- ports:
- - name: grpc
- containerPort: 4245
- readinessProbe:
- grpc:
- port: 4222
- timeoutSeconds: 3
- # livenessProbe will kill the pod, we should be very conservative
- # here on failures since killing the pod should be a last resort, and
- # we should provide enough time for relay to retry before killing it.
- livenessProbe:
- grpc:
- port: 4222
- timeoutSeconds: 10
- # Give relay time to establish connections and make a few retries
- # before starting livenessProbes.
- initialDelaySeconds: 10
- # 10 second * 12 failures = 2 minutes of failure.
- # If relay cannot become healthy after 2 minutes, then killing it
- # might resolve whatever issue is occurring.
- #
- # 10 seconds is a reasonable retry period so we can see if it's
- # failing regularly or only sporadically.
- periodSeconds: 10
- failureThreshold: 12
- startupProbe:
- grpc:
- port: 4222
- # Give relay time to get it's certs and establish connections and
- # make a few retries before starting startupProbes.
- initialDelaySeconds: 10
- # 20 * 3 seconds = 1 minute of failure before we consider startup as failed.
- failureThreshold: 20
- # Retry more frequently at startup so that it can be considered started more quickly.
- periodSeconds: 3
- volumeMounts:
- - name: config
- mountPath: /etc/hubble-relay
- readOnly: true
- - name: tls
- mountPath: /var/lib/hubble-relay/tls
- readOnly: true
- terminationMessagePolicy: FallbackToLogsOnError
-
- restartPolicy: Always
- priorityClassName:
- serviceAccountName: "hubble-relay"
- automountServiceAccountToken: false
- terminationGracePeriodSeconds: 1
- affinity:
- podAffinity:
- requiredDuringSchedulingIgnoredDuringExecution:
- - labelSelector:
- matchLabels:
- k8s-app: cilium
- topologyKey: kubernetes.io/hostname
- nodeSelector:
- kubernetes.io/os: linux
- volumes:
- - name: config
- configMap:
- name: hubble-relay-config
- items:
- - key: config.yaml
- path: config.yaml
- - name: tls
- projected:
- # note: the leading zero means this number is in octal representation: do not remove it
- defaultMode: 0400
- sources:
- - secret:
- name: hubble-relay-client-certs
- items:
- - key: tls.crt
- path: client.crt
- - key: tls.key
- path: client.key
- - key: ca.crt
- path: hubble-server-ca.crt
----
-# Source: cilium/templates/hubble-ui/deployment.yaml
-kind: Deployment
-apiVersion: apps/v1
-metadata:
- name: hubble-ui
- namespace: kube-system
- labels:
- k8s-app: hubble-ui
- app.kubernetes.io/name: hubble-ui
- app.kubernetes.io/part-of: cilium
-spec:
- replicas: 1
- selector:
- matchLabels:
- k8s-app: hubble-ui
- strategy:
- rollingUpdate:
- maxUnavailable: 1
- type: RollingUpdate
- template:
- metadata:
- annotations:
- # ensure pods roll when configmap updates
- cilium.io/hubble-ui-nginx-configmap-checksum: "de069d2597e16e4de004ce684b15d74b2ab6051c717ae073d86199a76d91fcf1"
- labels:
- k8s-app: hubble-ui
- app.kubernetes.io/name: hubble-ui
- app.kubernetes.io/part-of: cilium
- spec:
- securityContext:
- fsGroup: 1001
- runAsGroup: 1001
- runAsUser: 1001
- priorityClassName:
- serviceAccountName: "hubble-ui"
- automountServiceAccountToken: true
- containers:
- - name: frontend
- image: "quay.io/cilium/hubble-ui:v0.13.2@sha256:9e37c1296b802830834cc87342a9182ccbb71ffebb711971e849221bd9d59392"
- imagePullPolicy: IfNotPresent
- ports:
- - name: http
- containerPort: 8081
- livenessProbe:
- httpGet:
- path: /healthz
- port: 8081
- readinessProbe:
- httpGet:
- path: /
- port: 8081
- volumeMounts:
- - name: hubble-ui-nginx-conf
- mountPath: /etc/nginx/conf.d/default.conf
- subPath: nginx.conf
- - name: tmp-dir
- mountPath: /tmp
- terminationMessagePolicy: FallbackToLogsOnError
- - name: backend
- image: "quay.io/cilium/hubble-ui-backend:v0.13.2@sha256:a034b7e98e6ea796ed26df8f4e71f83fc16465a19d166eff67a03b822c0bfa15"
- imagePullPolicy: IfNotPresent
- env:
- - name: EVENTS_SERVER_PORT
- value: "8090"
- - name: FLOWS_API_ADDR
- value: "hubble-relay:80"
- ports:
- - name: grpc
- containerPort: 8090
- volumeMounts:
- terminationMessagePolicy: FallbackToLogsOnError
- nodeSelector:
- kubernetes.io/os: linux
- volumes:
- - configMap:
- defaultMode: 420
- name: hubble-ui-nginx
- name: hubble-ui-nginx-conf
- - emptyDir: {}
- name: tmp-dir
diff --git a/test/e2e/e2e_suite_test.go b/test/e2e/e2e_suite_test.go
index b42aa6744..b468818cc 100644
--- a/test/e2e/e2e_suite_test.go
+++ b/test/e2e/e2e_suite_test.go
@@ -30,9 +30,11 @@ import (
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
appsv1 "k8s.io/api/apps/v1"
+ corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
restclient "k8s.io/client-go/rest"
+ "k8s.io/client-go/tools/clientcmd"
"k8s.io/klog/v2"
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
"sigs.k8s.io/cluster-api/test/framework"
@@ -205,25 +207,25 @@ func createClusterctlLocalRepository(ctx context.Context, config *clusterctl.E2E
// Ensuring a CCM file is defined in the config and register a FileTransformation to inject the referenced file as in place of the CCM_RESOURCES envSubst variable.
Expect(config.Variables).To(HaveKey(CiliumPath), "Missing %s variable in the config", CiliumPath)
- ciliumPath := config.GetVariable(CiliumPath)
+ ciliumPath := config.GetVariableOrEmpty(CiliumPath)
Expect(ciliumPath).To(BeAnExistingFile(), "The %s variable should resolve to an existing file", CiliumPath)
createRepositoryInput.RegisterClusterResourceSetConfigMapTransformation(ciliumPath, CiliumResources)
// Ensuring a CCM file is defined in the config and register a FileTransformation to inject the referenced file as in place of the CCM_RESOURCES envSubst variable.
Expect(config.Variables).To(HaveKey(CCMPath), "Missing %s variable in the config", CCMPath)
- ccmPath := config.GetVariable(CCMPath)
+ ccmPath := config.GetVariableOrEmpty(CCMPath)
Expect(ccmPath).To(BeAnExistingFile(), "The %s variable should resolve to an existing file", CCMPath)
createRepositoryInput.RegisterClusterResourceSetConfigMapTransformation(ccmPath, CCMResources)
// Ensuring a CCM file is defined for clusters with networks in the config and register a FileTransformation to inject the referenced file as in place of the CCM_RESOURCES envSubst variable.
Expect(config.Variables).To(HaveKey(CCMNetworkPath), "Missing %s variable in the config", CCMNetworkPath)
- ccmNetworkPath := config.GetVariable(CCMNetworkPath)
+ ccmNetworkPath := config.GetVariableOrEmpty(CCMNetworkPath)
Expect(ccmNetworkPath).To(BeAnExistingFile(), "The %s variable should resolve to an existing file", CCMNetworkPath)
createRepositoryInput.RegisterClusterResourceSetConfigMapTransformation(ccmNetworkPath, CCMNetworkResources)
// Ensuring a CCM file is defined for clusters with networks in the config and register a FileTransformation to inject the referenced file as in place of the CCM_RESOURCES envSubst variable.
Expect(config.Variables).To(HaveKey(CCMHetznerPath), "Missing %s variable in the config", CCMHetznerPath)
- ccmHetznerPath := config.GetVariable(CCMHetznerPath)
+ ccmHetznerPath := config.GetVariableOrEmpty(CCMHetznerPath)
Expect(ccmHetznerPath).To(BeAnExistingFile(), "The %s variable should resolve to an existing file", CCMHetznerPath)
createRepositoryInput.RegisterClusterResourceSetConfigMapTransformation(ccmHetznerPath, CCMHetznerResources)
@@ -254,6 +256,7 @@ func setupBootstrapCluster(config *clusterctl.E2EConfig, scheme *runtime.Scheme,
return clusterProvider, clusterProxy
}
+// logStatusContinuously does log the state of the mgt-cluster and the wl-clusters continuously.
func logStatusContinuously(ctx context.Context, restConfig *restclient.Config, c client.Client) {
for {
select {
@@ -269,32 +272,86 @@ func logStatusContinuously(ctx context.Context, restConfig *restclient.Config, c
}
}
+// logStatus logs the current state of the mgt-cluster and the wl-clusters once.
+// It gets called again and again by logStatusContinuously.
func logStatus(ctx context.Context, restConfig *restclient.Config, c client.Client) error {
- log("≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡ <<< Start logging status")
+ log("^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^")
+ log(fmt.Sprintf("≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡ %s <<< Start logging status", time.Now().Format("2006-01-02 15:04:05")))
if err := logCaphDeployment(ctx, c); err != nil {
return err
}
+
if err := logBareMetalHostStatus(ctx, c); err != nil {
return err
}
+
if err := logHCloudMachineStatus(ctx, c); err != nil {
return err
}
- if err := logConditions(ctx, restConfig); err != nil {
+
+ // Log the unhealthy conditions of the mgt-cluster
+ if err := logConditions(ctx, "mgt-cluster", restConfig); err != nil {
return err
}
- log("≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡ End logging status >>>")
+
+ // log the unhealthy conditions of the wl-clusters.
+ clusterList := &clusterv1.ClusterList{}
+ err := c.List(ctx, clusterList)
+ if err != nil {
+ return fmt.Errorf("failed to list clusters: %w", err)
+ }
+
+ for _, cluster := range clusterList.Items {
+ // get the secret containing the kubeconfig.
+ secretName := cluster.Name + "-kubeconfig"
+ secret := &corev1.Secret{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: secretName,
+ Namespace: cluster.Namespace,
+ },
+ }
+
+ err := c.Get(ctx, client.ObjectKeyFromObject(secret), secret)
+ if err != nil {
+ log(fmt.Sprintf("Failed to get Secret %s/%s: %v", cluster.Namespace, secretName, err))
+ continue
+ }
+
+ data := secret.Data["value"]
+ if len(data) == 0 {
+ log(fmt.Sprintf("Failed to get Secret %s/%s: content is empty", cluster.Namespace, secretName))
+ continue
+ }
+
+ // create restConfig from kubeconfig.
+ restConfig, err := clientcmd.RESTConfigFromKubeConfig(data)
+ if err != nil {
+ log(fmt.Sprintf("Failed to create REST config from Secret %s/%s: %v", cluster.Namespace, secretName, err))
+ continue
+ }
+
+ // log the conditions of this wl-cluster
+ err = logConditions(ctx, "wl-cluster "+cluster.Name, restConfig)
+ if err != nil {
+ log(fmt.Sprintf("Failed to log Conditions %s/%s: %v", cluster.Namespace, secretName, err))
+ continue
+ }
+ }
+
+ log(fmt.Sprintf("≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡ %s End logging status >>>", time.Now().Format("2006-01-02 15:04:05")))
return nil
}
-func logConditions(ctx context.Context, restConfig *restclient.Config) error {
- counter, err := checkconditions.RunAndGetCounter(ctx, restConfig, checkconditions.Arguments{})
+func logConditions(ctx context.Context, clusterName string, restConfig *restclient.Config) error {
+ restConfig.QPS = -1 // Since Kubernetes 1.29 "API Priority and Fairness" handles that.
+ counter, err := checkconditions.RunAndGetCounter(ctx, restConfig, &checkconditions.Arguments{})
if err != nil {
- return fmt.Errorf("failed to get check conditions: %w", err)
+ return fmt.Errorf("check conditions: %w", err)
}
- log(fmt.Sprintf("--------------------------------------------------- Unhealthy Conditions: %d",
+ log(fmt.Sprintf("----------------------------------------------- %s ---- Unhealthy Conditions: %d",
+ clusterName,
len(counter.Lines)))
for _, line := range counter.Lines {
@@ -416,13 +473,18 @@ func logBareMetalHostStatus(ctx context.Context, c client.Client) error {
if hbmh.Spec.Status.ProvisioningState == "" {
continue
}
- log("BareMetalHost: " + hbmh.Name + " " + fmt.Sprint(hbmh.Spec.ServerID))
- log(" ProvisioningState: " + string(hbmh.Spec.Status.ProvisioningState))
+
+ // log infos about that hbmh.
+ log("BareMetalHost: " + hbmh.Name + " " + fmt.Sprint(hbmh.Spec.ServerID) +
+ " | IPv4: " + hbmh.Spec.Status.IPv4)
+
+ // Show an Error, if set.
eMsg := string(hbmh.Spec.Status.ErrorType) + " " + hbmh.Spec.Status.ErrorMessage
eMsg = strings.TrimSpace(eMsg)
if eMsg != "" {
log(" Error: " + eMsg)
}
+
readyC := conditions.Get(hbmh, clusterv1.ReadyCondition)
msg := ""
reason := ""
@@ -432,7 +494,7 @@ func logBareMetalHostStatus(ctx context.Context, c client.Client) error {
reason = readyC.Reason
state = string(readyC.Status)
}
- log(" Ready Condition: " + state + " " + reason + " " + msg)
+ log(" ProvisioningState: " + string(hbmh.Spec.Status.ProvisioningState) + " | Ready Condition: " + state + " " + reason + " " + msg)
}
return nil
}
diff --git a/test/e2e/upgrade_caph_controller.go b/test/e2e/upgrade_caph_controller.go
index d1b2e425a..93e24e0e0 100644
--- a/test/e2e/upgrade_caph_controller.go
+++ b/test/e2e/upgrade_caph_controller.go
@@ -104,14 +104,14 @@ func ClusterctlUpgradeSpec(ctx context.Context, inputGetter func() ClusterctlUpg
if input.InitWithBinary == "" {
gomega.Expect(input.E2EConfig.Variables).To(gomega.HaveKey(initWithBinaryVariableName), "Invalid argument. %s variable must be defined when calling %s spec", initWithBinaryVariableName, specName)
gomega.Expect(input.E2EConfig.Variables[initWithBinaryVariableName]).ToNot(gomega.BeEmpty(), "Invalid argument. %s variable can't be empty when calling %s spec", initWithBinaryVariableName, specName)
- clusterctlBinaryURLTemplate = input.E2EConfig.GetVariable(initWithBinaryVariableName)
+ clusterctlBinaryURLTemplate = input.E2EConfig.GetVariableOrEmpty(initWithBinaryVariableName)
} else {
clusterctlBinaryURLTemplate = input.InitWithBinary
}
if input.InitWithInfrastructureProviderVersion == "" {
gomega.Expect(input.E2EConfig.Variables).To(gomega.HaveKey(initWithInfrastructureProviderVersion), "Invalid argument. %s variable must be defined when calling %s spec", initWithBinaryVariableName, specName)
gomega.Expect(input.E2EConfig.Variables[initWithInfrastructureProviderVersion]).ToNot(gomega.BeEmpty(), "Invalid argument. %s variable can't be empty when calling %s spec", initWithBinaryVariableName, specName)
- desiredInfrastructureVersion = input.E2EConfig.GetVariable(initWithInfrastructureProviderVersion)
+ desiredInfrastructureVersion = input.E2EConfig.GetVariableOrEmpty(initWithInfrastructureProviderVersion)
} else {
desiredInfrastructureVersion = input.InitWithInfrastructureProviderVersion
}
@@ -142,7 +142,7 @@ func ClusterctlUpgradeSpec(ctx context.Context, inputGetter func() ClusterctlUpg
Flavor: input.MgmtFlavor,
Namespace: managementClusterNamespace.Name,
ClusterName: managementClusterName,
- KubernetesVersion: input.E2EConfig.GetVariable(initWithKubernetesVersion),
+ KubernetesVersion: input.E2EConfig.GetVariableOrEmpty(initWithKubernetesVersion),
ControlPlaneMachineCount: ptr.To[int64](1),
WorkerMachineCount: ptr.To[int64](1),
},
@@ -185,7 +185,7 @@ func ClusterctlUpgradeSpec(ctx context.Context, inputGetter func() ClusterctlUpg
// variable can be used to select versions with a specific contract.
contract := "*"
if input.E2EConfig.HasVariable(initWithProvidersContract) {
- contract = input.E2EConfig.GetVariable(initWithProvidersContract)
+ contract = input.E2EConfig.GetVariableOrEmpty(initWithProvidersContract)
}
if input.InitWithProvidersContract != "" {
contract = input.InitWithProvidersContract
@@ -224,7 +224,7 @@ func ClusterctlUpgradeSpec(ctx context.Context, inputGetter func() ClusterctlUpg
// so we are getting a template using the downloaded version of clusterctl, applying it, and wait for machines to be provisioned.
workLoadClusterName = fmt.Sprintf("%s-%s", specName, util.RandomString(6))
- kubernetesVersion := input.E2EConfig.GetVariable(KubernetesVersion)
+ kubernetesVersion := input.E2EConfig.GetVariableOrEmpty(KubernetesVersion)
controlPlaneMachineCount := ptr.To[int64](1)
workerMachineCount := ptr.To[int64](1)
@@ -318,9 +318,11 @@ func ClusterctlUpgradeSpec(ctx context.Context, inputGetter func() ClusterctlUpg
managementClusterProxy.CollectWorkloadClusterLogs(ctx, testNamespace.Name, managementClusterName, filepath.Join(input.ArtifactFolder, "clusters", managementClusterName, "machines"))
framework.DumpAllResources(ctx, framework.DumpAllResourcesInput{
- Lister: managementClusterProxy.GetClient(),
- Namespace: testNamespace.Name,
- LogPath: filepath.Join(input.ArtifactFolder, "clusters", managementClusterResources.Cluster.Name, "resources"),
+ Lister: managementClusterProxy.GetClient(),
+ Namespace: testNamespace.Name,
+ LogPath: filepath.Join(input.ArtifactFolder, "clusters", managementClusterResources.Cluster.Name, "resources"),
+ KubeConfigPath: managementClusterProxy.GetKubeconfigPath(),
+ ClusterctlConfigPath: input.ClusterctlConfigPath,
})
if !input.SkipCleanup {
@@ -328,8 +330,9 @@ func ClusterctlUpgradeSpec(ctx context.Context, inputGetter func() ClusterctlUpg
case discovery.ServerSupportsVersion(managementClusterProxy.GetClientSet().DiscoveryClient, clusterv1.GroupVersion) == nil:
Byf("Deleting all %s clusters in namespace %s in management cluster %s", clusterv1.GroupVersion, testNamespace.Name, managementClusterName)
framework.DeleteAllClustersAndWait(ctx, framework.DeleteAllClustersAndWaitInput{
- Client: managementClusterProxy.GetClient(),
- Namespace: testNamespace.Name,
+ ClusterProxy: managementClusterProxy,
+ Namespace: testNamespace.Name,
+ ClusterctlConfigPath: input.ClusterctlConfigPath,
}, input.E2EConfig.GetIntervals(specName, "wait-delete-cluster")...)
default:
fmt.Fprintf(ginkgo.GinkgoWriter, "Management Cluster does not appear to support CAPI resources.")
@@ -337,8 +340,9 @@ func ClusterctlUpgradeSpec(ctx context.Context, inputGetter func() ClusterctlUpg
Byf("Deleting cluster %s/%s", testNamespace.Name, managementClusterName)
framework.DeleteAllClustersAndWait(ctx, framework.DeleteAllClustersAndWaitInput{
- Client: managementClusterProxy.GetClient(),
- Namespace: testNamespace.Name,
+ ClusterProxy: managementClusterProxy,
+ Namespace: testNamespace.Name,
+ ClusterctlConfigPath: input.ClusterctlConfigPath,
}, input.E2EConfig.GetIntervals(specName, "wait-delete-cluster")...)
Byf("Deleting namespace %s used for hosting the %q test", testNamespace.Name, specName)
@@ -358,7 +362,9 @@ func ClusterctlUpgradeSpec(ctx context.Context, inputGetter func() ClusterctlUpg
}
// Dumps all the resources in the spec namespace, then cleanups the cluster object and the spec namespace itself.
- dumpSpecResourcesAndCleanup(ctx, specName, input.BootstrapClusterProxy, input.ArtifactFolder, managementClusterNamespace, managementClusterCancelWatches, managementClusterResources.Cluster, input.E2EConfig.GetIntervals, input.SkipCleanup)
+ dumpSpecResourcesAndCleanup(ctx, specName, input.BootstrapClusterProxy, input.ArtifactFolder, managementClusterNamespace, managementClusterCancelWatches, managementClusterResources.Cluster, input.E2EConfig.GetIntervals, input.SkipCleanup,
+ input.BootstrapClusterProxy.GetKubeconfigPath(),
+ input.ClusterctlConfigPath)
})
}
diff --git a/test/helpers/defaults.go b/test/helpers/defaults.go
index 622a3b8d0..b06f2c0d3 100644
--- a/test/helpers/defaults.go
+++ b/test/helpers/defaults.go
@@ -123,7 +123,7 @@ func WithSSHSpec() HostOpts {
}
// WithSSHSpecInclPorts gives the option to define a host with ssh spec incl. ports.
-func WithSSHSpecInclPorts(portAfterInstallImage, portAfterCloudInit int) HostOpts {
+func WithSSHSpecInclPorts(portAfterInstallImage int) HostOpts {
return func(host *infrav1.HetznerBareMetalHost) {
host.Spec.Status.SSHSpec = &infrav1.SSHSpec{
SecretRef: infrav1.SSHSecretRef{
@@ -135,7 +135,6 @@ func WithSSHSpecInclPorts(portAfterInstallImage, portAfterCloudInit int) HostOpt
},
},
PortAfterInstallImage: portAfterInstallImage,
- PortAfterCloudInit: portAfterCloudInit,
}
}
}
diff --git a/test/helpers/envtest.go b/test/helpers/envtest.go
index 49d556cbe..17015fcee 100644
--- a/test/helpers/envtest.go
+++ b/test/helpers/envtest.go
@@ -47,6 +47,7 @@ import (
"sigs.k8s.io/controller-runtime/pkg/cache"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/envtest"
+ metricsserver "sigs.k8s.io/controller-runtime/pkg/metrics/server"
"sigs.k8s.io/controller-runtime/pkg/webhook"
infrav1 "github.com/syself/cluster-api-provider-hetzner/api/v1beta1"
@@ -58,6 +59,7 @@ import (
sshclient "github.com/syself/cluster-api-provider-hetzner/pkg/services/baremetal/client/ssh"
hcloudclient "github.com/syself/cluster-api-provider-hetzner/pkg/services/hcloud/client"
fakeclient "github.com/syself/cluster-api-provider-hetzner/pkg/services/hcloud/client/fake"
+ "github.com/syself/cluster-api-provider-hetzner/pkg/services/hcloud/mockedsshclient"
)
func init() {
@@ -104,23 +106,22 @@ func init() {
}
}
-type (
- // TestEnvironment encapsulates a Kubernetes local test environment.
- TestEnvironment struct {
- ctrl.Manager
- client.Client
- Config *rest.Config
- HCloudClientFactory hcloudclient.Factory
- RobotClientFactory robotclient.Factory
- SSHClientFactory sshclient.Factory
- RescueSSHClient *sshmock.Client
- OSSSHClientAfterInstallImage *sshmock.Client
- OSSSHClientAfterCloudInit *sshmock.Client
- RobotClient *robotmock.Client
- cancel context.CancelFunc
- RateLimitWaitTime time.Duration
- }
-)
+// TestEnvironment encapsulates a Kubernetes local test environment.
+type TestEnvironment struct {
+ ctrl.Manager
+ client.Client
+ Config *rest.Config
+ HCloudClientFactory hcloudclient.Factory
+ RobotClientFactory robotclient.Factory
+ BaremetalSSHClientFactory sshclient.Factory
+ HCloudSSHClientFactory sshclient.Factory
+ RescueSSHClient *sshmock.Client
+ OSSSHClientAfterInstallImage *sshmock.Client
+ OSSSHClientAfterCloudInit *sshmock.Client
+ RobotClient *robotmock.Client
+ cancel context.CancelFunc
+ RateLimitWaitTime time.Duration
+}
// NewTestEnvironment creates a new environment spinning up a local api-server.
func NewTestEnvironment() *TestEnvironment {
@@ -145,6 +146,10 @@ func NewTestEnvironment() *TestEnvironment {
Host: "localhost",
},
),
+ Metrics: metricsserver.Options{
+ // Disable MetricsServer, so that two tests processes can run concurrently
+ BindAddress: "0",
+ },
})
if err != nil {
klog.Fatalf("unable to create manager: %s", err)
@@ -161,16 +166,16 @@ func NewTestEnvironment() *TestEnvironment {
if err := (&infrav1.HCloudMachine{}).SetupWebhookWithManager(mgr); err != nil {
klog.Fatalf("failed to set up webhook with manager for HCloudMachine: %s", err)
}
- if err := (&infrav1.HCloudMachineTemplateWebhook{}).SetupWebhookWithManager(mgr); err != nil {
+ if err := (&infrav1.HCloudMachineTemplate{}).SetupWebhookWithManager(mgr); err != nil {
klog.Fatalf("failed to set up webhook with manager for HCloudMachineTemplate: %s", err)
}
if err := (&infrav1.HetznerBareMetalMachine{}).SetupWebhookWithManager(mgr); err != nil {
klog.Fatalf("failed to set up webhook with manager for HetznerBareMetalMachine: %s", err)
}
- if err := (&infrav1.HetznerBareMetalMachineTemplateWebhook{}).SetupWebhookWithManager(mgr); err != nil {
+ if err := (&infrav1.HetznerBareMetalMachineTemplate{}).SetupWebhookWithManager(mgr); err != nil {
klog.Fatalf("failed to set up webhook with manager for HetznerBareMetalMachineTemplate: %s", err)
}
- if err := (&infrav1.HetznerBareMetalHostWebhook{}).SetupWebhookWithManager(mgr); err != nil {
+ if err := (&infrav1.HetznerBareMetalHost{}).SetupWebhookWithManager(mgr); err != nil {
klog.Fatalf("failed to set up webhook with manager for HetznerBareMetalHost: %s", err)
}
if err := (&infrav1.HetznerBareMetalRemediation{}).SetupWebhookWithManager(mgr); err != nil {
@@ -194,12 +199,15 @@ func NewTestEnvironment() *TestEnvironment {
robotClient := &robotmock.Client{}
+ hcloudSSHClient := &sshmock.Client{}
+
return &TestEnvironment{
Manager: mgr,
Client: mgr.GetClient(),
Config: mgr.GetConfig(),
HCloudClientFactory: hcloudClientFactory,
- SSHClientFactory: mocks.NewSSHFactory(rescueSSHClient, osSSHClientAfterInstallImage, osSSHClientAfterCloudInit),
+ BaremetalSSHClientFactory: mocks.NewSSHFactory(rescueSSHClient, osSSHClientAfterInstallImage, osSSHClientAfterCloudInit),
+ HCloudSSHClientFactory: mockedsshclient.NewSSHFactory(hcloudSSHClient),
RescueSSHClient: rescueSSHClient,
OSSSHClientAfterInstallImage: osSSHClientAfterInstallImage,
OSSSHClientAfterCloudInit: osSSHClientAfterCloudInit,
diff --git a/test/helpers/webhook.go b/test/helpers/webhook.go
index 1046d627b..4461ebb0e 100644
--- a/test/helpers/webhook.go
+++ b/test/helpers/webhook.go
@@ -17,7 +17,6 @@ limitations under the License.
package helpers
import (
- "fmt"
"net"
"os"
"path"
@@ -95,8 +94,14 @@ func initializeWebhookInEnvironment() {
klog.Fatalf("Failed to append core controller webhook config: %v", err)
}
+ // Two tests processes should be able ro run concurrently. Each needs an own port:
+ port, err := getFreePort()
+ if err != nil {
+ klog.Fatalf("Failed to get a free port for webhook: %v", err)
+ }
+
env.WebhookInstallOptions = envtest.WebhookInstallOptions{
- LocalServingPort: 9443,
+ LocalServingPort: port,
LocalServingHost: "localhost",
MaxTime: 20 * time.Second,
PollInterval: time.Second,
@@ -106,23 +111,34 @@ func initializeWebhookInEnvironment() {
}
// WaitForWebhooks waits for webhook port to be ready.
+// WaitForWebhooks will not return until the webhook port is open.
func (t *TestEnvironment) WaitForWebhooks() {
port := env.WebhookInstallOptions.LocalServingPort
-
klog.V(2).Infof("Waiting for webhook port %d to be open prior to running tests", port)
timeout := 1 * time.Second
for {
- time.Sleep(1 * time.Second)
- fmt.Printf("checking port .................... %v\n", port)
+ time.Sleep(timeout)
conn, err := net.DialTimeout("tcp", net.JoinHostPort("127.0.0.1", strconv.Itoa(port)), timeout)
if err != nil {
klog.V(2).Infof("Webhook port is not ready, will retry in %v: %s", timeout, err)
continue
}
- if err := conn.Close(); err != nil {
- klog.Fatalf("failed to close connection: %s", err)
+ err = conn.Close()
+ if err != nil {
+ klog.V(2).Infof("Failed to close connection: %s", err)
+ return
}
klog.V(2).Info("Webhook port is now open. Continuing with tests...")
return
}
}
+
+func getFreePort() (port int, err error) {
+ ln, err := net.Listen("tcp", "[::]:0")
+ if err != nil {
+ return 0, err
+ }
+ port = ln.Addr().(*net.TCPAddr).Port
+ err = ln.Close()
+ return
+}
diff --git a/vendor/cel.dev/expr/.bazelversion b/vendor/cel.dev/expr/.bazelversion
new file mode 100644
index 000000000..26bc914a3
--- /dev/null
+++ b/vendor/cel.dev/expr/.bazelversion
@@ -0,0 +1,2 @@
+7.0.1
+# Keep this pinned version in parity with cel-go
diff --git a/vendor/cel.dev/expr/.gitattributes b/vendor/cel.dev/expr/.gitattributes
new file mode 100644
index 000000000..3de1ec213
--- /dev/null
+++ b/vendor/cel.dev/expr/.gitattributes
@@ -0,0 +1,2 @@
+*.pb.go linguist-generated=true
+*.pb.go -diff -merge
diff --git a/vendor/cel.dev/expr/.gitignore b/vendor/cel.dev/expr/.gitignore
new file mode 100644
index 000000000..0d4fed27c
--- /dev/null
+++ b/vendor/cel.dev/expr/.gitignore
@@ -0,0 +1,2 @@
+bazel-*
+MODULE.bazel.lock
diff --git a/vendor/cel.dev/expr/BUILD.bazel b/vendor/cel.dev/expr/BUILD.bazel
new file mode 100644
index 000000000..37d8adc95
--- /dev/null
+++ b/vendor/cel.dev/expr/BUILD.bazel
@@ -0,0 +1,34 @@
+load("@io_bazel_rules_go//go:def.bzl", "go_library")
+
+package(default_visibility = ["//visibility:public"])
+
+licenses(["notice"]) # Apache 2.0
+
+go_library(
+ name = "expr",
+ srcs = [
+ "checked.pb.go",
+ "eval.pb.go",
+ "explain.pb.go",
+ "syntax.pb.go",
+ "value.pb.go",
+ ],
+ importpath = "cel.dev/expr",
+ visibility = ["//visibility:public"],
+ deps = [
+ "@org_golang_google_genproto_googleapis_rpc//status:go_default_library",
+ "@org_golang_google_protobuf//reflect/protoreflect",
+ "@org_golang_google_protobuf//runtime/protoimpl",
+ "@org_golang_google_protobuf//types/known/anypb",
+ "@org_golang_google_protobuf//types/known/durationpb",
+ "@org_golang_google_protobuf//types/known/emptypb",
+ "@org_golang_google_protobuf//types/known/structpb",
+ "@org_golang_google_protobuf//types/known/timestamppb",
+ ],
+)
+
+alias(
+ name = "go_default_library",
+ actual = ":expr",
+ visibility = ["//visibility:public"],
+)
diff --git a/vendor/cel.dev/expr/CODE_OF_CONDUCT.md b/vendor/cel.dev/expr/CODE_OF_CONDUCT.md
new file mode 100644
index 000000000..59908e2d8
--- /dev/null
+++ b/vendor/cel.dev/expr/CODE_OF_CONDUCT.md
@@ -0,0 +1,25 @@
+# Contributor Code of Conduct
+## Version 0.1.1 (adapted from 0.3b-angular)
+
+As contributors and maintainers of the Common Expression Language
+(CEL) project, we pledge to respect everyone who contributes by
+posting issues, updating documentation, submitting pull requests,
+providing feedback in comments, and any other activities.
+
+Communication through any of CEL's channels (GitHub, Gitter, IRC,
+mailing lists, Google+, Twitter, etc.) must be constructive and never
+resort to personal attacks, trolling, public or private harassment,
+insults, or other unprofessional conduct.
+
+We promise to extend courtesy and respect to everyone involved in this
+project regardless of gender, gender identity, sexual orientation,
+disability, age, race, ethnicity, religion, or level of experience. We
+expect anyone contributing to the project to do the same.
+
+If any member of the community violates this code of conduct, the
+maintainers of the CEL project may take action, removing issues,
+comments, and PRs or blocking accounts as deemed appropriate.
+
+If you are subject to or witness unacceptable behavior, or have any
+other concerns, please email us at
+[cel-conduct@google.com](mailto:cel-conduct@google.com).
diff --git a/vendor/cel.dev/expr/CONTRIBUTING.md b/vendor/cel.dev/expr/CONTRIBUTING.md
new file mode 100644
index 000000000..8f5fd5c31
--- /dev/null
+++ b/vendor/cel.dev/expr/CONTRIBUTING.md
@@ -0,0 +1,32 @@
+# How to Contribute
+
+We'd love to accept your patches and contributions to this project. There are a
+few guidelines you need to follow.
+
+## Contributor License Agreement
+
+Contributions to this project must be accompanied by a Contributor License
+Agreement. You (or your employer) retain the copyright to your contribution,
+this simply gives us permission to use and redistribute your contributions as
+part of the project. Head over to
+
+
+### Mergo in the wild
+
+Mergo is used by [thousands](https://deps.dev/go/dario.cat%2Fmergo/v1.0.0/dependents) [of](https://deps.dev/go/github.com%2Fimdario%2Fmergo/v0.3.16/dependents) [projects](https://deps.dev/go/github.com%2Fimdario%2Fmergo/v0.3.12), including:
+
+* [containerd/containerd](https://github.com/containerd/containerd)
+* [datadog/datadog-agent](https://github.com/datadog/datadog-agent)
+* [docker/cli/](https://github.com/docker/cli/)
+* [goreleaser/goreleaser](https://github.com/goreleaser/goreleaser)
+* [go-micro/go-micro](https://github.com/go-micro/go-micro)
+* [grafana/loki](https://github.com/grafana/loki)
+* [kubernetes/kubernetes](https://github.com/kubernetes/kubernetes)
+* [masterminds/sprig](github.com/Masterminds/sprig)
+* [moby/moby](https://github.com/moby/moby)
+* [slackhq/nebula](https://github.com/slackhq/nebula)
+* [volcano-sh/volcano](https://github.com/volcano-sh/volcano)
+
+## Install
+
+ go get dario.cat/mergo
+
+ // use in your .go code
+ import (
+ "dario.cat/mergo"
+ )
+
+## Usage
+
+You can only merge same-type structs with exported fields initialized as zero value of their type and same-types maps. Mergo won't merge unexported (private) fields but will do recursively any exported one. It won't merge empty structs value as [they are zero values](https://golang.org/ref/spec#The_zero_value) too. Also, maps will be merged recursively except for structs inside maps (because they are not addressable using Go reflection).
+
+```go
+if err := mergo.Merge(&dst, src); err != nil {
+ // ...
+}
+```
+
+Also, you can merge overwriting values using the transformer `WithOverride`.
+
+```go
+if err := mergo.Merge(&dst, src, mergo.WithOverride); err != nil {
+ // ...
+}
+```
+
+If you need to override pointers, so the source pointer's value is assigned to the destination's pointer, you must use `WithoutDereference`:
+
+```go
+package main
+
+import (
+ "fmt"
+
+ "dario.cat/mergo"
+)
+
+type Foo struct {
+ A *string
+ B int64
+}
+
+func main() {
+ first := "first"
+ second := "second"
+ src := Foo{
+ A: &first,
+ B: 2,
+ }
+
+ dest := Foo{
+ A: &second,
+ B: 1,
+ }
+
+ mergo.Merge(&dest, src, mergo.WithOverride, mergo.WithoutDereference)
+}
+```
+
+Additionally, you can map a `map[string]interface{}` to a struct (and otherwise, from struct to map), following the same restrictions as in `Merge()`. Keys are capitalized to find each corresponding exported field.
+
+```go
+if err := mergo.Map(&dst, srcMap); err != nil {
+ // ...
+}
+```
+
+Warning: if you map a struct to map, it won't do it recursively. Don't expect Mergo to map struct members of your struct as `map[string]interface{}`. They will be just assigned as values.
+
+Here is a nice example:
+
+```go
+package main
+
+import (
+ "fmt"
+ "dario.cat/mergo"
+)
+
+type Foo struct {
+ A string
+ B int64
+}
+
+func main() {
+ src := Foo{
+ A: "one",
+ B: 2,
+ }
+ dest := Foo{
+ A: "two",
+ }
+ mergo.Merge(&dest, src)
+ fmt.Println(dest)
+ // Will print
+ // {two 2}
+}
+```
+
+Note: if test are failing due missing package, please execute:
+
+ go get gopkg.in/yaml.v3
+
+### Transformers
+
+Transformers allow to merge specific types differently than in the default behavior. In other words, now you can customize how some types are merged. For example, `time.Time` is a struct; it doesn't have zero value but IsZero can return true because it has fields with zero value. How can we merge a non-zero `time.Time`?
+
+```go
+package main
+
+import (
+ "fmt"
+ "dario.cat/mergo"
+ "reflect"
+ "time"
+)
+
+type timeTransformer struct {
+}
+
+func (t timeTransformer) Transformer(typ reflect.Type) func(dst, src reflect.Value) error {
+ if typ == reflect.TypeOf(time.Time{}) {
+ return func(dst, src reflect.Value) error {
+ if dst.CanSet() {
+ isZero := dst.MethodByName("IsZero")
+ result := isZero.Call([]reflect.Value{})
+ if result[0].Bool() {
+ dst.Set(src)
+ }
+ }
+ return nil
+ }
+ }
+ return nil
+}
+
+type Snapshot struct {
+ Time time.Time
+ // ...
+}
+
+func main() {
+ src := Snapshot{time.Now()}
+ dest := Snapshot{}
+ mergo.Merge(&dest, src, mergo.WithTransformers(timeTransformer{}))
+ fmt.Println(dest)
+ // Will print
+ // { 2018-01-12 01:15:00 +0000 UTC m=+0.000000001 }
+}
+```
+
+## Contact me
+
+If I can help you, you have an idea or you are using Mergo in your projects, don't hesitate to drop me a line (or a pull request): [@im_dario](https://twitter.com/im_dario)
+
+## About
+
+Written by [Dario Castañé](http://dario.im).
+
+## License
+
+[BSD 3-Clause](http://opensource.org/licenses/BSD-3-Clause) license, as [Go language](http://golang.org/LICENSE).
+
+[](https://app.fossa.io/projects/git%2Bgithub.com%2Fimdario%2Fmergo?ref=badge_large)
diff --git a/vendor/dario.cat/mergo/SECURITY.md b/vendor/dario.cat/mergo/SECURITY.md
new file mode 100644
index 000000000..a5de61f77
--- /dev/null
+++ b/vendor/dario.cat/mergo/SECURITY.md
@@ -0,0 +1,14 @@
+# Security Policy
+
+## Supported Versions
+
+| Version | Supported |
+| ------- | ------------------ |
+| 0.3.x | :white_check_mark: |
+| < 0.3 | :x: |
+
+## Security contact information
+
+To report a security vulnerability, please use the
+[Tidelift security contact](https://tidelift.com/security).
+Tidelift will coordinate the fix and disclosure.
diff --git a/vendor/dario.cat/mergo/doc.go b/vendor/dario.cat/mergo/doc.go
new file mode 100644
index 000000000..7d96ec054
--- /dev/null
+++ b/vendor/dario.cat/mergo/doc.go
@@ -0,0 +1,148 @@
+// Copyright 2013 Dario Castañé. All rights reserved.
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+/*
+A helper to merge structs and maps in Golang. Useful for configuration default values, avoiding messy if-statements.
+
+Mergo merges same-type structs and maps by setting default values in zero-value fields. Mergo won't merge unexported (private) fields. It will do recursively any exported one. It also won't merge structs inside maps (because they are not addressable using Go reflection).
+
+# Status
+
+It is ready for production use. It is used in several projects by Docker, Google, The Linux Foundation, VMWare, Shopify, etc.
+
+# Important notes
+
+1.0.0
+
+In 1.0.0 Mergo moves to a vanity URL `dario.cat/mergo`.
+
+0.3.9
+
+Please keep in mind that a problematic PR broke 0.3.9. We reverted it in 0.3.10. We consider 0.3.10 as stable but not bug-free. . Also, this version adds suppot for go modules.
+
+Keep in mind that in 0.3.2, Mergo changed Merge() and Map() signatures to support transformers. We added an optional/variadic argument so that it won't break the existing code.
+
+If you were using Mergo before April 6th, 2015, please check your project works as intended after updating your local copy with go get -u dario.cat/mergo. I apologize for any issue caused by its previous behavior and any future bug that Mergo could cause in existing projects after the change (release 0.2.0).
+
+# Install
+
+Do your usual installation procedure:
+
+ go get dario.cat/mergo
+
+ // use in your .go code
+ import (
+ "dario.cat/mergo"
+ )
+
+# Usage
+
+You can only merge same-type structs with exported fields initialized as zero value of their type and same-types maps. Mergo won't merge unexported (private) fields but will do recursively any exported one. It won't merge empty structs value as they are zero values too. Also, maps will be merged recursively except for structs inside maps (because they are not addressable using Go reflection).
+
+ if err := mergo.Merge(&dst, src); err != nil {
+ // ...
+ }
+
+Also, you can merge overwriting values using the transformer WithOverride.
+
+ if err := mergo.Merge(&dst, src, mergo.WithOverride); err != nil {
+ // ...
+ }
+
+Additionally, you can map a map[string]interface{} to a struct (and otherwise, from struct to map), following the same restrictions as in Merge(). Keys are capitalized to find each corresponding exported field.
+
+ if err := mergo.Map(&dst, srcMap); err != nil {
+ // ...
+ }
+
+Warning: if you map a struct to map, it won't do it recursively. Don't expect Mergo to map struct members of your struct as map[string]interface{}. They will be just assigned as values.
+
+Here is a nice example:
+
+ package main
+
+ import (
+ "fmt"
+ "dario.cat/mergo"
+ )
+
+ type Foo struct {
+ A string
+ B int64
+ }
+
+ func main() {
+ src := Foo{
+ A: "one",
+ B: 2,
+ }
+ dest := Foo{
+ A: "two",
+ }
+ mergo.Merge(&dest, src)
+ fmt.Println(dest)
+ // Will print
+ // {two 2}
+ }
+
+# Transformers
+
+Transformers allow to merge specific types differently than in the default behavior. In other words, now you can customize how some types are merged. For example, time.Time is a struct; it doesn't have zero value but IsZero can return true because it has fields with zero value. How can we merge a non-zero time.Time?
+
+ package main
+
+ import (
+ "fmt"
+ "dario.cat/mergo"
+ "reflect"
+ "time"
+ )
+
+ type timeTransformer struct {
+ }
+
+ func (t timeTransformer) Transformer(typ reflect.Type) func(dst, src reflect.Value) error {
+ if typ == reflect.TypeOf(time.Time{}) {
+ return func(dst, src reflect.Value) error {
+ if dst.CanSet() {
+ isZero := dst.MethodByName("IsZero")
+ result := isZero.Call([]reflect.Value{})
+ if result[0].Bool() {
+ dst.Set(src)
+ }
+ }
+ return nil
+ }
+ }
+ return nil
+ }
+
+ type Snapshot struct {
+ Time time.Time
+ // ...
+ }
+
+ func main() {
+ src := Snapshot{time.Now()}
+ dest := Snapshot{}
+ mergo.Merge(&dest, src, mergo.WithTransformers(timeTransformer{}))
+ fmt.Println(dest)
+ // Will print
+ // { 2018-01-12 01:15:00 +0000 UTC m=+0.000000001 }
+ }
+
+# Contact me
+
+If I can help you, you have an idea or you are using Mergo in your projects, don't hesitate to drop me a line (or a pull request): https://twitter.com/im_dario
+
+# About
+
+Written by Dario Castañé: https://da.rio.hn
+
+# License
+
+BSD 3-Clause license, as Go language.
+*/
+package mergo
diff --git a/vendor/github.com/imdario/mergo/map.go b/vendor/dario.cat/mergo/map.go
similarity index 95%
rename from vendor/github.com/imdario/mergo/map.go
rename to vendor/dario.cat/mergo/map.go
index a13a7ee46..759b4f74f 100644
--- a/vendor/github.com/imdario/mergo/map.go
+++ b/vendor/dario.cat/mergo/map.go
@@ -44,7 +44,7 @@ func deepMap(dst, src reflect.Value, visited map[uintptr]*visit, depth int, conf
}
}
// Remember, remember...
- visited[h] = &visit{addr, typ, seen}
+ visited[h] = &visit{typ, seen, addr}
}
zeroValue := reflect.Value{}
switch dst.Kind() {
@@ -58,7 +58,7 @@ func deepMap(dst, src reflect.Value, visited map[uintptr]*visit, depth int, conf
}
fieldName := field.Name
fieldName = changeInitialCase(fieldName, unicode.ToLower)
- if v, ok := dstMap[fieldName]; !ok || (isEmptyValue(reflect.ValueOf(v)) || overwrite) {
+ if _, ok := dstMap[fieldName]; !ok || (!isEmptyValue(reflect.ValueOf(src.Field(i).Interface()), !config.ShouldNotDereference) && overwrite) || config.overwriteWithEmptyValue {
dstMap[fieldName] = src.Field(i).Interface()
}
}
@@ -142,7 +142,7 @@ func MapWithOverwrite(dst, src interface{}, opts ...func(*Config)) error {
func _map(dst, src interface{}, opts ...func(*Config)) error {
if dst != nil && reflect.ValueOf(dst).Kind() != reflect.Ptr {
- return ErrNonPointerAgument
+ return ErrNonPointerArgument
}
var (
vDst, vSrc reflect.Value
diff --git a/vendor/dario.cat/mergo/merge.go b/vendor/dario.cat/mergo/merge.go
new file mode 100644
index 000000000..fd47c95b2
--- /dev/null
+++ b/vendor/dario.cat/mergo/merge.go
@@ -0,0 +1,409 @@
+// Copyright 2013 Dario Castañé. All rights reserved.
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Based on src/pkg/reflect/deepequal.go from official
+// golang's stdlib.
+
+package mergo
+
+import (
+ "fmt"
+ "reflect"
+)
+
+func hasMergeableFields(dst reflect.Value) (exported bool) {
+ for i, n := 0, dst.NumField(); i < n; i++ {
+ field := dst.Type().Field(i)
+ if field.Anonymous && dst.Field(i).Kind() == reflect.Struct {
+ exported = exported || hasMergeableFields(dst.Field(i))
+ } else if isExportedComponent(&field) {
+ exported = exported || len(field.PkgPath) == 0
+ }
+ }
+ return
+}
+
+func isExportedComponent(field *reflect.StructField) bool {
+ pkgPath := field.PkgPath
+ if len(pkgPath) > 0 {
+ return false
+ }
+ c := field.Name[0]
+ if 'a' <= c && c <= 'z' || c == '_' {
+ return false
+ }
+ return true
+}
+
+type Config struct {
+ Transformers Transformers
+ Overwrite bool
+ ShouldNotDereference bool
+ AppendSlice bool
+ TypeCheck bool
+ overwriteWithEmptyValue bool
+ overwriteSliceWithEmptyValue bool
+ sliceDeepCopy bool
+ debug bool
+}
+
+type Transformers interface {
+ Transformer(reflect.Type) func(dst, src reflect.Value) error
+}
+
+// Traverses recursively both values, assigning src's fields values to dst.
+// The map argument tracks comparisons that have already been seen, which allows
+// short circuiting on recursive types.
+func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, config *Config) (err error) {
+ overwrite := config.Overwrite
+ typeCheck := config.TypeCheck
+ overwriteWithEmptySrc := config.overwriteWithEmptyValue
+ overwriteSliceWithEmptySrc := config.overwriteSliceWithEmptyValue
+ sliceDeepCopy := config.sliceDeepCopy
+
+ if !src.IsValid() {
+ return
+ }
+ if dst.CanAddr() {
+ addr := dst.UnsafeAddr()
+ h := 17 * addr
+ seen := visited[h]
+ typ := dst.Type()
+ for p := seen; p != nil; p = p.next {
+ if p.ptr == addr && p.typ == typ {
+ return nil
+ }
+ }
+ // Remember, remember...
+ visited[h] = &visit{typ, seen, addr}
+ }
+
+ if config.Transformers != nil && !isReflectNil(dst) && dst.IsValid() {
+ if fn := config.Transformers.Transformer(dst.Type()); fn != nil {
+ err = fn(dst, src)
+ return
+ }
+ }
+
+ switch dst.Kind() {
+ case reflect.Struct:
+ if hasMergeableFields(dst) {
+ for i, n := 0, dst.NumField(); i < n; i++ {
+ if err = deepMerge(dst.Field(i), src.Field(i), visited, depth+1, config); err != nil {
+ return
+ }
+ }
+ } else {
+ if dst.CanSet() && (isReflectNil(dst) || overwrite) && (!isEmptyValue(src, !config.ShouldNotDereference) || overwriteWithEmptySrc) {
+ dst.Set(src)
+ }
+ }
+ case reflect.Map:
+ if dst.IsNil() && !src.IsNil() {
+ if dst.CanSet() {
+ dst.Set(reflect.MakeMap(dst.Type()))
+ } else {
+ dst = src
+ return
+ }
+ }
+
+ if src.Kind() != reflect.Map {
+ if overwrite && dst.CanSet() {
+ dst.Set(src)
+ }
+ return
+ }
+
+ for _, key := range src.MapKeys() {
+ srcElement := src.MapIndex(key)
+ if !srcElement.IsValid() {
+ continue
+ }
+ dstElement := dst.MapIndex(key)
+ switch srcElement.Kind() {
+ case reflect.Chan, reflect.Func, reflect.Map, reflect.Interface, reflect.Slice:
+ if srcElement.IsNil() {
+ if overwrite {
+ dst.SetMapIndex(key, srcElement)
+ }
+ continue
+ }
+ fallthrough
+ default:
+ if !srcElement.CanInterface() {
+ continue
+ }
+ switch reflect.TypeOf(srcElement.Interface()).Kind() {
+ case reflect.Struct:
+ fallthrough
+ case reflect.Ptr:
+ fallthrough
+ case reflect.Map:
+ srcMapElm := srcElement
+ dstMapElm := dstElement
+ if srcMapElm.CanInterface() {
+ srcMapElm = reflect.ValueOf(srcMapElm.Interface())
+ if dstMapElm.IsValid() {
+ dstMapElm = reflect.ValueOf(dstMapElm.Interface())
+ }
+ }
+ if err = deepMerge(dstMapElm, srcMapElm, visited, depth+1, config); err != nil {
+ return
+ }
+ case reflect.Slice:
+ srcSlice := reflect.ValueOf(srcElement.Interface())
+
+ var dstSlice reflect.Value
+ if !dstElement.IsValid() || dstElement.IsNil() {
+ dstSlice = reflect.MakeSlice(srcSlice.Type(), 0, srcSlice.Len())
+ } else {
+ dstSlice = reflect.ValueOf(dstElement.Interface())
+ }
+
+ if (!isEmptyValue(src, !config.ShouldNotDereference) || overwriteWithEmptySrc || overwriteSliceWithEmptySrc) && (overwrite || isEmptyValue(dst, !config.ShouldNotDereference)) && !config.AppendSlice && !sliceDeepCopy {
+ if typeCheck && srcSlice.Type() != dstSlice.Type() {
+ return fmt.Errorf("cannot override two slices with different type (%s, %s)", srcSlice.Type(), dstSlice.Type())
+ }
+ dstSlice = srcSlice
+ } else if config.AppendSlice {
+ if srcSlice.Type() != dstSlice.Type() {
+ return fmt.Errorf("cannot append two slices with different type (%s, %s)", srcSlice.Type(), dstSlice.Type())
+ }
+ dstSlice = reflect.AppendSlice(dstSlice, srcSlice)
+ } else if sliceDeepCopy {
+ i := 0
+ for ; i < srcSlice.Len() && i < dstSlice.Len(); i++ {
+ srcElement := srcSlice.Index(i)
+ dstElement := dstSlice.Index(i)
+
+ if srcElement.CanInterface() {
+ srcElement = reflect.ValueOf(srcElement.Interface())
+ }
+ if dstElement.CanInterface() {
+ dstElement = reflect.ValueOf(dstElement.Interface())
+ }
+
+ if err = deepMerge(dstElement, srcElement, visited, depth+1, config); err != nil {
+ return
+ }
+ }
+
+ }
+ dst.SetMapIndex(key, dstSlice)
+ }
+ }
+
+ if dstElement.IsValid() && !isEmptyValue(dstElement, !config.ShouldNotDereference) {
+ if reflect.TypeOf(srcElement.Interface()).Kind() == reflect.Slice {
+ continue
+ }
+ if reflect.TypeOf(srcElement.Interface()).Kind() == reflect.Map && reflect.TypeOf(dstElement.Interface()).Kind() == reflect.Map {
+ continue
+ }
+ }
+
+ if srcElement.IsValid() && ((srcElement.Kind() != reflect.Ptr && overwrite) || !dstElement.IsValid() || isEmptyValue(dstElement, !config.ShouldNotDereference)) {
+ if dst.IsNil() {
+ dst.Set(reflect.MakeMap(dst.Type()))
+ }
+ dst.SetMapIndex(key, srcElement)
+ }
+ }
+
+ // Ensure that all keys in dst are deleted if they are not in src.
+ if overwriteWithEmptySrc {
+ for _, key := range dst.MapKeys() {
+ srcElement := src.MapIndex(key)
+ if !srcElement.IsValid() {
+ dst.SetMapIndex(key, reflect.Value{})
+ }
+ }
+ }
+ case reflect.Slice:
+ if !dst.CanSet() {
+ break
+ }
+ if (!isEmptyValue(src, !config.ShouldNotDereference) || overwriteWithEmptySrc || overwriteSliceWithEmptySrc) && (overwrite || isEmptyValue(dst, !config.ShouldNotDereference)) && !config.AppendSlice && !sliceDeepCopy {
+ dst.Set(src)
+ } else if config.AppendSlice {
+ if src.Type() != dst.Type() {
+ return fmt.Errorf("cannot append two slice with different type (%s, %s)", src.Type(), dst.Type())
+ }
+ dst.Set(reflect.AppendSlice(dst, src))
+ } else if sliceDeepCopy {
+ for i := 0; i < src.Len() && i < dst.Len(); i++ {
+ srcElement := src.Index(i)
+ dstElement := dst.Index(i)
+ if srcElement.CanInterface() {
+ srcElement = reflect.ValueOf(srcElement.Interface())
+ }
+ if dstElement.CanInterface() {
+ dstElement = reflect.ValueOf(dstElement.Interface())
+ }
+
+ if err = deepMerge(dstElement, srcElement, visited, depth+1, config); err != nil {
+ return
+ }
+ }
+ }
+ case reflect.Ptr:
+ fallthrough
+ case reflect.Interface:
+ if isReflectNil(src) {
+ if overwriteWithEmptySrc && dst.CanSet() && src.Type().AssignableTo(dst.Type()) {
+ dst.Set(src)
+ }
+ break
+ }
+
+ if src.Kind() != reflect.Interface {
+ if dst.IsNil() || (src.Kind() != reflect.Ptr && overwrite) {
+ if dst.CanSet() && (overwrite || isEmptyValue(dst, !config.ShouldNotDereference)) {
+ dst.Set(src)
+ }
+ } else if src.Kind() == reflect.Ptr {
+ if !config.ShouldNotDereference {
+ if err = deepMerge(dst.Elem(), src.Elem(), visited, depth+1, config); err != nil {
+ return
+ }
+ } else if src.Elem().Kind() != reflect.Struct {
+ if overwriteWithEmptySrc || (overwrite && !src.IsNil()) || dst.IsNil() {
+ dst.Set(src)
+ }
+ }
+ } else if dst.Elem().Type() == src.Type() {
+ if err = deepMerge(dst.Elem(), src, visited, depth+1, config); err != nil {
+ return
+ }
+ } else {
+ return ErrDifferentArgumentsTypes
+ }
+ break
+ }
+
+ if dst.IsNil() || overwrite {
+ if dst.CanSet() && (overwrite || isEmptyValue(dst, !config.ShouldNotDereference)) {
+ dst.Set(src)
+ }
+ break
+ }
+
+ if dst.Elem().Kind() == src.Elem().Kind() {
+ if err = deepMerge(dst.Elem(), src.Elem(), visited, depth+1, config); err != nil {
+ return
+ }
+ break
+ }
+ default:
+ mustSet := (isEmptyValue(dst, !config.ShouldNotDereference) || overwrite) && (!isEmptyValue(src, !config.ShouldNotDereference) || overwriteWithEmptySrc)
+ if mustSet {
+ if dst.CanSet() {
+ dst.Set(src)
+ } else {
+ dst = src
+ }
+ }
+ }
+
+ return
+}
+
+// Merge will fill any empty for value type attributes on the dst struct using corresponding
+// src attributes if they themselves are not empty. dst and src must be valid same-type structs
+// and dst must be a pointer to struct.
+// It won't merge unexported (private) fields and will do recursively any exported field.
+func Merge(dst, src interface{}, opts ...func(*Config)) error {
+ return merge(dst, src, opts...)
+}
+
+// MergeWithOverwrite will do the same as Merge except that non-empty dst attributes will be overridden by
+// non-empty src attribute values.
+// Deprecated: use Merge(…) with WithOverride
+func MergeWithOverwrite(dst, src interface{}, opts ...func(*Config)) error {
+ return merge(dst, src, append(opts, WithOverride)...)
+}
+
+// WithTransformers adds transformers to merge, allowing to customize the merging of some types.
+func WithTransformers(transformers Transformers) func(*Config) {
+ return func(config *Config) {
+ config.Transformers = transformers
+ }
+}
+
+// WithOverride will make merge override non-empty dst attributes with non-empty src attributes values.
+func WithOverride(config *Config) {
+ config.Overwrite = true
+}
+
+// WithOverwriteWithEmptyValue will make merge override non empty dst attributes with empty src attributes values.
+func WithOverwriteWithEmptyValue(config *Config) {
+ config.Overwrite = true
+ config.overwriteWithEmptyValue = true
+}
+
+// WithOverrideEmptySlice will make merge override empty dst slice with empty src slice.
+func WithOverrideEmptySlice(config *Config) {
+ config.overwriteSliceWithEmptyValue = true
+}
+
+// WithoutDereference prevents dereferencing pointers when evaluating whether they are empty
+// (i.e. a non-nil pointer is never considered empty).
+func WithoutDereference(config *Config) {
+ config.ShouldNotDereference = true
+}
+
+// WithAppendSlice will make merge append slices instead of overwriting it.
+func WithAppendSlice(config *Config) {
+ config.AppendSlice = true
+}
+
+// WithTypeCheck will make merge check types while overwriting it (must be used with WithOverride).
+func WithTypeCheck(config *Config) {
+ config.TypeCheck = true
+}
+
+// WithSliceDeepCopy will merge slice element one by one with Overwrite flag.
+func WithSliceDeepCopy(config *Config) {
+ config.sliceDeepCopy = true
+ config.Overwrite = true
+}
+
+func merge(dst, src interface{}, opts ...func(*Config)) error {
+ if dst != nil && reflect.ValueOf(dst).Kind() != reflect.Ptr {
+ return ErrNonPointerArgument
+ }
+ var (
+ vDst, vSrc reflect.Value
+ err error
+ )
+
+ config := &Config{}
+
+ for _, opt := range opts {
+ opt(config)
+ }
+
+ if vDst, vSrc, err = resolveValues(dst, src); err != nil {
+ return err
+ }
+ if vDst.Type() != vSrc.Type() {
+ return ErrDifferentArgumentsTypes
+ }
+ return deepMerge(vDst, vSrc, make(map[uintptr]*visit), 0, config)
+}
+
+// IsReflectNil is the reflect value provided nil
+func isReflectNil(v reflect.Value) bool {
+ k := v.Kind()
+ switch k {
+ case reflect.Interface, reflect.Slice, reflect.Chan, reflect.Func, reflect.Map, reflect.Ptr:
+ // Both interface and slice are nil if first word is 0.
+ // Both are always bigger than a word; assume flagIndir.
+ return v.IsNil()
+ default:
+ return false
+ }
+}
diff --git a/vendor/github.com/imdario/mergo/mergo.go b/vendor/dario.cat/mergo/mergo.go
similarity index 90%
rename from vendor/github.com/imdario/mergo/mergo.go
rename to vendor/dario.cat/mergo/mergo.go
index 9fe362d47..0a721e2d8 100644
--- a/vendor/github.com/imdario/mergo/mergo.go
+++ b/vendor/dario.cat/mergo/mergo.go
@@ -20,7 +20,7 @@ var (
ErrNotSupported = errors.New("only structs, maps, and slices are supported")
ErrExpectedMapAsDestination = errors.New("dst was expected to be a map")
ErrExpectedStructAsDestination = errors.New("dst was expected to be a struct")
- ErrNonPointerAgument = errors.New("dst must be a pointer")
+ ErrNonPointerArgument = errors.New("dst must be a pointer")
)
// During deepMerge, must keep track of checks that are
@@ -28,13 +28,13 @@ var (
// checks in progress are true when it reencounters them.
// Visited are stored in a map indexed by 17 * a1 + a2;
type visit struct {
- ptr uintptr
typ reflect.Type
next *visit
+ ptr uintptr
}
// From src/pkg/encoding/json/encode.go.
-func isEmptyValue(v reflect.Value) bool {
+func isEmptyValue(v reflect.Value, shouldDereference bool) bool {
switch v.Kind() {
case reflect.Array, reflect.Map, reflect.Slice, reflect.String:
return v.Len() == 0
@@ -50,7 +50,10 @@ func isEmptyValue(v reflect.Value) bool {
if v.IsNil() {
return true
}
- return isEmptyValue(v.Elem())
+ if shouldDereference {
+ return isEmptyValue(v.Elem(), shouldDereference)
+ }
+ return false
case reflect.Func:
return v.IsNil()
case reflect.Invalid:
diff --git a/vendor/github.com/Masterminds/semver/v3/.golangci.yml b/vendor/github.com/Masterminds/semver/v3/.golangci.yml
index c87d1c4b9..fbc633259 100644
--- a/vendor/github.com/Masterminds/semver/v3/.golangci.yml
+++ b/vendor/github.com/Masterminds/semver/v3/.golangci.yml
@@ -5,12 +5,9 @@ linters:
disable-all: true
enable:
- misspell
- - structcheck
- govet
- staticcheck
- - deadcode
- errcheck
- - varcheck
- unparam
- ineffassign
- nakedret
diff --git a/vendor/github.com/Masterminds/semver/v3/CHANGELOG.md b/vendor/github.com/Masterminds/semver/v3/CHANGELOG.md
index f12626423..f95a504fe 100644
--- a/vendor/github.com/Masterminds/semver/v3/CHANGELOG.md
+++ b/vendor/github.com/Masterminds/semver/v3/CHANGELOG.md
@@ -1,5 +1,33 @@
# Changelog
+## 3.3.0 (2024-08-27)
+
+### Added
+
+- #238: Add LessThanEqual and GreaterThanEqual functions (thanks @grosser)
+- #213: nil version equality checking (thanks @KnutZuidema)
+
+### Changed
+
+- #241: Simplify StrictNewVersion parsing (thanks @grosser)
+- Testing support up through Go 1.23
+- Minimum version set to 1.21 as this is what's tested now
+- Fuzz testing now supports caching
+
+## 3.2.1 (2023-04-10)
+
+### Changed
+
+- #198: Improved testing around pre-release names
+- #200: Improved code scanning with addition of CodeQL
+- #201: Testing now includes Go 1.20. Go 1.17 has been dropped
+- #202: Migrated Fuzz testing to Go built-in Fuzzing. CI runs daily
+- #203: Docs updated for security details
+
+### Fixed
+
+- #199: Fixed issue with range transformations
+
## 3.2.0 (2022-11-28)
### Added
diff --git a/vendor/github.com/Masterminds/semver/v3/Makefile b/vendor/github.com/Masterminds/semver/v3/Makefile
index eac19178f..9ca87a2c7 100644
--- a/vendor/github.com/Masterminds/semver/v3/Makefile
+++ b/vendor/github.com/Masterminds/semver/v3/Makefile
@@ -1,7 +1,5 @@
GOPATH=$(shell go env GOPATH)
GOLANGCI_LINT=$(GOPATH)/bin/golangci-lint
-GOFUZZBUILD = $(GOPATH)/bin/go-fuzz-build
-GOFUZZ = $(GOPATH)/bin/go-fuzz
.PHONY: lint
lint: $(GOLANGCI_LINT)
@@ -19,19 +17,15 @@ test-cover:
GO111MODULE=on go test -cover .
.PHONY: fuzz
-fuzz: $(GOFUZZBUILD) $(GOFUZZ)
- @echo "==> Fuzz testing"
- $(GOFUZZBUILD)
- $(GOFUZZ) -workdir=_fuzz
+fuzz:
+ @echo "==> Running Fuzz Tests"
+ go env GOCACHE
+ go test -fuzz=FuzzNewVersion -fuzztime=15s .
+ go test -fuzz=FuzzStrictNewVersion -fuzztime=15s .
+ go test -fuzz=FuzzNewConstraint -fuzztime=15s .
$(GOLANGCI_LINT):
# Install golangci-lint. The configuration for it is in the .golangci.yml
# file in the root of the repository
echo ${GOPATH}
- curl -sfL https://install.goreleaser.com/github.com/golangci/golangci-lint.sh | sh -s -- -b $(GOPATH)/bin v1.17.1
-
-$(GOFUZZBUILD):
- cd / && go get -u github.com/dvyukov/go-fuzz/go-fuzz-build
-
-$(GOFUZZ):
- cd / && go get -u github.com/dvyukov/go-fuzz/go-fuzz github.com/dvyukov/go-fuzz/go-fuzz-dep
\ No newline at end of file
+ curl -sfL https://install.goreleaser.com/github.com/golangci/golangci-lint.sh | sh -s -- -b $(GOPATH)/bin v1.56.2
diff --git a/vendor/github.com/Masterminds/semver/v3/README.md b/vendor/github.com/Masterminds/semver/v3/README.md
index d8f54dcbd..ed5693608 100644
--- a/vendor/github.com/Masterminds/semver/v3/README.md
+++ b/vendor/github.com/Masterminds/semver/v3/README.md
@@ -13,23 +13,22 @@ Active](https://masterminds.github.io/stability/active.svg)](https://masterminds
[](https://pkg.go.dev/github.com/Masterminds/semver/v3)
[](https://goreportcard.com/report/github.com/Masterminds/semver)
-If you are looking for a command line tool for version comparisons please see
-[vert](https://github.com/Masterminds/vert) which uses this library.
-
## Package Versions
+Note, import `github.com/Masterminds/semver/v3` to use the latest version.
+
There are three major versions fo the `semver` package.
-* 3.x.x is the new stable and active version. This version is focused on constraint
+* 3.x.x is the stable and active version. This version is focused on constraint
compatibility for range handling in other tools from other languages. It has
a similar API to the v1 releases. The development of this version is on the master
branch. The documentation for this version is below.
* 2.x was developed primarily for [dep](https://github.com/golang/dep). There are
no tagged releases and the development was performed by [@sdboyer](https://github.com/sdboyer).
There are API breaking changes from v1. This version lives on the [2.x branch](https://github.com/Masterminds/semver/tree/2.x).
-* 1.x.x is the most widely used version with numerous tagged releases. This is the
- previous stable and is still maintained for bug fixes. The development, to fix
- bugs, occurs on the release-1 branch. You can read the documentation [here](https://github.com/Masterminds/semver/blob/release-1/README.md).
+* 1.x.x is the original release. It is no longer maintained. You should use the
+ v3 release instead. You can read the documentation for the 1.x.x release
+ [here](https://github.com/Masterminds/semver/blob/release-1/README.md).
## Parsing Semantic Versions
@@ -78,12 +77,12 @@ There are two methods for comparing versions. One uses comparison methods on
differences to notes between these two methods of comparison.
1. When two versions are compared using functions such as `Compare`, `LessThan`,
- and others it will follow the specification and always include prereleases
+ and others it will follow the specification and always include pre-releases
within the comparison. It will provide an answer that is valid with the
comparison section of the spec at https://semver.org/#spec-item-11
2. When constraint checking is used for checks or validation it will follow a
different set of rules that are common for ranges with tools like npm/js
- and Rust/Cargo. This includes considering prereleases to be invalid if the
+ and Rust/Cargo. This includes considering pre-releases to be invalid if the
ranges does not include one. If you want to have it include pre-releases a
simple solution is to include `-0` in your range.
3. Constraint ranges can have some complex rules including the shorthand use of
@@ -111,7 +110,7 @@ v, err := semver.NewVersion("1.3")
if err != nil {
// Handle version not being parsable.
}
-// Check if the version meets the constraints. The a variable will be true.
+// Check if the version meets the constraints. The variable a will be true.
a := c.Check(v)
```
@@ -135,20 +134,20 @@ The basic comparisons are:
### Working With Prerelease Versions
Pre-releases, for those not familiar with them, are used for software releases
-prior to stable or generally available releases. Examples of prereleases include
-development, alpha, beta, and release candidate releases. A prerelease may be
+prior to stable or generally available releases. Examples of pre-releases include
+development, alpha, beta, and release candidate releases. A pre-release may be
a version such as `1.2.3-beta.1` while the stable release would be `1.2.3`. In the
-order of precedence, prereleases come before their associated releases. In this
+order of precedence, pre-releases come before their associated releases. In this
example `1.2.3-beta.1 < 1.2.3`.
-According to the Semantic Version specification prereleases may not be
+According to the Semantic Version specification, pre-releases may not be
API compliant with their release counterpart. It says,
> A pre-release version indicates that the version is unstable and might not satisfy the intended compatibility requirements as denoted by its associated normal version.
-SemVer comparisons using constraints without a prerelease comparator will skip
-prerelease versions. For example, `>=1.2.3` will skip prereleases when looking
-at a list of releases while `>=1.2.3-0` will evaluate and find prereleases.
+SemVer's comparisons using constraints without a pre-release comparator will skip
+pre-release versions. For example, `>=1.2.3` will skip pre-releases when looking
+at a list of releases while `>=1.2.3-0` will evaluate and find pre-releases.
The reason for the `0` as a pre-release version in the example comparison is
because pre-releases can only contain ASCII alphanumerics and hyphens (along with
@@ -169,6 +168,9 @@ These look like:
* `1.2 - 1.4.5` which is equivalent to `>= 1.2 <= 1.4.5`
* `2.3.4 - 4.5` which is equivalent to `>= 2.3.4 <= 4.5`
+Note that `1.2-1.4.5` without whitespace is parsed completely differently; it's
+parsed as a single constraint `1.2.0` with _prerelease_ `1.4.5`.
+
### Wildcards In Comparisons
The `x`, `X`, and `*` characters can be used as a wildcard character. This works
@@ -242,3 +244,15 @@ for _, m := range msgs {
If you find an issue or want to contribute please file an [issue](https://github.com/Masterminds/semver/issues)
or [create a pull request](https://github.com/Masterminds/semver/pulls).
+
+## Security
+
+Security is an important consideration for this project. The project currently
+uses the following tools to help discover security issues:
+
+* [CodeQL](https://github.com/Masterminds/semver)
+* [gosec](https://github.com/securego/gosec)
+* Daily Fuzz testing
+
+If you believe you have found a security vulnerability you can privately disclose
+it through the [GitHub security page](https://github.com/Masterminds/semver/security).
diff --git a/vendor/github.com/Masterminds/semver/v3/SECURITY.md b/vendor/github.com/Masterminds/semver/v3/SECURITY.md
new file mode 100644
index 000000000..a30a66b1f
--- /dev/null
+++ b/vendor/github.com/Masterminds/semver/v3/SECURITY.md
@@ -0,0 +1,19 @@
+# Security Policy
+
+## Supported Versions
+
+The following versions of semver are currently supported:
+
+| Version | Supported |
+| ------- | ------------------ |
+| 3.x | :white_check_mark: |
+| 2.x | :x: |
+| 1.x | :x: |
+
+Fixes are only released for the latest minor version in the form of a patch release.
+
+## Reporting a Vulnerability
+
+You can privately disclose a vulnerability through GitHubs
+[private vulnerability reporting](https://github.com/Masterminds/semver/security/advisories)
+mechanism.
diff --git a/vendor/github.com/Masterminds/semver/v3/constraints.go b/vendor/github.com/Masterminds/semver/v3/constraints.go
index 203072e46..8461c7ed9 100644
--- a/vendor/github.com/Masterminds/semver/v3/constraints.go
+++ b/vendor/github.com/Masterminds/semver/v3/constraints.go
@@ -586,7 +586,7 @@ func rewriteRange(i string) string {
}
o := i
for _, v := range m {
- t := fmt.Sprintf(">= %s, <= %s", v[1], v[11])
+ t := fmt.Sprintf(">= %s, <= %s ", v[1], v[11])
o = strings.Replace(o, v[0], t, 1)
}
diff --git a/vendor/github.com/Masterminds/semver/v3/fuzz.go b/vendor/github.com/Masterminds/semver/v3/fuzz.go
deleted file mode 100644
index a242ad705..000000000
--- a/vendor/github.com/Masterminds/semver/v3/fuzz.go
+++ /dev/null
@@ -1,22 +0,0 @@
-// +build gofuzz
-
-package semver
-
-func Fuzz(data []byte) int {
- d := string(data)
-
- // Test NewVersion
- _, _ = NewVersion(d)
-
- // Test StrictNewVersion
- _, _ = StrictNewVersion(d)
-
- // Test NewConstraint
- _, _ = NewConstraint(d)
-
- // The return value should be 0 normally, 1 if the priority in future tests
- // should be increased, and -1 if future tests should skip passing in that
- // data. We do not have a reason to change priority so 0 is always returned.
- // There are example tests that do this.
- return 0
-}
diff --git a/vendor/github.com/Masterminds/semver/v3/version.go b/vendor/github.com/Masterminds/semver/v3/version.go
index 7c4bed334..ff499fb66 100644
--- a/vendor/github.com/Masterminds/semver/v3/version.go
+++ b/vendor/github.com/Masterminds/semver/v3/version.go
@@ -83,22 +83,23 @@ func StrictNewVersion(v string) (*Version, error) {
original: v,
}
- // check for prerelease or build metadata
- var extra []string
- if strings.ContainsAny(parts[2], "-+") {
- // Start with the build metadata first as it needs to be on the right
- extra = strings.SplitN(parts[2], "+", 2)
- if len(extra) > 1 {
- // build metadata found
- sv.metadata = extra[1]
- parts[2] = extra[0]
+ // Extract build metadata
+ if strings.Contains(parts[2], "+") {
+ extra := strings.SplitN(parts[2], "+", 2)
+ sv.metadata = extra[1]
+ parts[2] = extra[0]
+ if err := validateMetadata(sv.metadata); err != nil {
+ return nil, err
}
+ }
- extra = strings.SplitN(parts[2], "-", 2)
- if len(extra) > 1 {
- // prerelease found
- sv.pre = extra[1]
- parts[2] = extra[0]
+ // Extract build prerelease
+ if strings.Contains(parts[2], "-") {
+ extra := strings.SplitN(parts[2], "-", 2)
+ sv.pre = extra[1]
+ parts[2] = extra[0]
+ if err := validatePrerelease(sv.pre); err != nil {
+ return nil, err
}
}
@@ -114,7 +115,7 @@ func StrictNewVersion(v string) (*Version, error) {
}
}
- // Extract the major, minor, and patch elements onto the returned Version
+ // Extract major, minor, and patch
var err error
sv.major, err = strconv.ParseUint(parts[0], 10, 64)
if err != nil {
@@ -131,23 +132,6 @@ func StrictNewVersion(v string) (*Version, error) {
return nil, err
}
- // No prerelease or build metadata found so returning now as a fastpath.
- if sv.pre == "" && sv.metadata == "" {
- return sv, nil
- }
-
- if sv.pre != "" {
- if err = validatePrerelease(sv.pre); err != nil {
- return nil, err
- }
- }
-
- if sv.metadata != "" {
- if err = validateMetadata(sv.metadata); err != nil {
- return nil, err
- }
- }
-
return sv, nil
}
@@ -381,15 +365,31 @@ func (v *Version) LessThan(o *Version) bool {
return v.Compare(o) < 0
}
+// LessThanEqual tests if one version is less or equal than another one.
+func (v *Version) LessThanEqual(o *Version) bool {
+ return v.Compare(o) <= 0
+}
+
// GreaterThan tests if one version is greater than another one.
func (v *Version) GreaterThan(o *Version) bool {
return v.Compare(o) > 0
}
+// GreaterThanEqual tests if one version is greater or equal than another one.
+func (v *Version) GreaterThanEqual(o *Version) bool {
+ return v.Compare(o) >= 0
+}
+
// Equal tests if two versions are equal to each other.
// Note, versions can be equal with different metadata since metadata
// is not considered part of the comparable version.
func (v *Version) Equal(o *Version) bool {
+ if v == o {
+ return true
+ }
+ if v == nil || o == nil {
+ return false
+ }
return v.Compare(o) == 0
}
diff --git a/vendor/github.com/Masterminds/sprig/v3/CHANGELOG.md b/vendor/github.com/Masterminds/sprig/v3/CHANGELOG.md
index 2ce45dd4e..b5ef766a7 100644
--- a/vendor/github.com/Masterminds/sprig/v3/CHANGELOG.md
+++ b/vendor/github.com/Masterminds/sprig/v3/CHANGELOG.md
@@ -1,5 +1,23 @@
# Changelog
+## Release 3.3.0 (2024-08-29)
+
+### Added
+
+- #400: added sha512sum function (thanks @itzik-elayev)
+
+### Changed
+
+- #407: Removed duplicate documentation (functions were documentated in 2 places)
+- #290: Corrected copy/paster oops in math documentation (thanks @zzhu41)
+- #369: Corrected template reference in docs (thanks @chey)
+- #375: Added link to URL documenation (thanks @carlpett)
+- #406: Updated the mergo dependency which had a breaking change (which was accounted for)
+- #376: Fixed documentation error (thanks @jheyduk)
+- #404: Updated dependency tree
+- #391: Fixed misspelling (thanks @chrishalbert)
+- #405: Updated Go versions used in testing
+
## Release 3.2.3 (2022-11-29)
### Changed
@@ -307,7 +325,7 @@ This release adds new functions, including:
- Added `semver` and `semverCompare` for Semantic Versions
- `list` replaces `tuple`
- Fixed issue with `join`
-- Added `first`, `last`, `intial`, `rest`, `prepend`, `append`, `toString`, `toStrings`, `sortAlpha`, `reverse`, `coalesce`, `pluck`, `pick`, `compact`, `keys`, `omit`, `uniq`, `has`, `without`
+- Added `first`, `last`, `initial`, `rest`, `prepend`, `append`, `toString`, `toStrings`, `sortAlpha`, `reverse`, `coalesce`, `pluck`, `pick`, `compact`, `keys`, `omit`, `uniq`, `has`, `without`
## Release 2.9.0 (2017-02-23)
@@ -361,7 +379,7 @@ Because we switched from `int` to `int64` as the return value for all integer ma
- `min` complements `max` (formerly `biggest`)
- `empty` indicates that a value is the empty value for its type
- `tuple` creates a tuple inside of a template: `{{$t := tuple "a", "b" "c"}}`
-- `dict` creates a dictionary inside of a template `{{$d := dict "key1" "val1" "key2" "val2"}}`
+- `dict` creates a dictionary inside of a template `{{$d := dict "key1" "val1" "key2" "val2"}}`
- Date formatters have been added for HTML dates (as used in `date` input fields)
- Integer math functions can convert from a number of types, including `string` (via `strconv.ParseInt`).
diff --git a/vendor/github.com/Masterminds/sprig/v3/crypto.go b/vendor/github.com/Masterminds/sprig/v3/crypto.go
index 13a5cd559..75fe027e4 100644
--- a/vendor/github.com/Masterminds/sprig/v3/crypto.go
+++ b/vendor/github.com/Masterminds/sprig/v3/crypto.go
@@ -14,6 +14,7 @@ import (
"crypto/rsa"
"crypto/sha1"
"crypto/sha256"
+ "crypto/sha512"
"crypto/x509"
"crypto/x509/pkix"
"encoding/asn1"
@@ -36,6 +37,11 @@ import (
"golang.org/x/crypto/scrypt"
)
+func sha512sum(input string) string {
+ hash := sha512.Sum512([]byte(input))
+ return hex.EncodeToString(hash[:])
+}
+
func sha256sum(input string) string {
hash := sha256.Sum256([]byte(input))
return hex.EncodeToString(hash[:])
diff --git a/vendor/github.com/Masterminds/sprig/v3/dict.go b/vendor/github.com/Masterminds/sprig/v3/dict.go
index ade889698..4315b3542 100644
--- a/vendor/github.com/Masterminds/sprig/v3/dict.go
+++ b/vendor/github.com/Masterminds/sprig/v3/dict.go
@@ -1,7 +1,7 @@
package sprig
import (
- "github.com/imdario/mergo"
+ "dario.cat/mergo"
"github.com/mitchellh/copystructure"
)
diff --git a/vendor/github.com/Masterminds/sprig/v3/doc.go b/vendor/github.com/Masterminds/sprig/v3/doc.go
index aabb9d448..91031d6d1 100644
--- a/vendor/github.com/Masterminds/sprig/v3/doc.go
+++ b/vendor/github.com/Masterminds/sprig/v3/doc.go
@@ -6,7 +6,7 @@ inside of Go `html/template` and `text/template` files.
To add these functions, use the `template.Funcs()` method:
- t := templates.New("foo").Funcs(sprig.FuncMap())
+ t := template.New("foo").Funcs(sprig.FuncMap())
Note that you should add the function map before you parse any template files.
diff --git a/vendor/github.com/Masterminds/sprig/v3/functions.go b/vendor/github.com/Masterminds/sprig/v3/functions.go
index 57fcec1d9..cda47d26f 100644
--- a/vendor/github.com/Masterminds/sprig/v3/functions.go
+++ b/vendor/github.com/Masterminds/sprig/v3/functions.go
@@ -22,8 +22,7 @@ import (
//
// Use this to pass the functions into the template engine:
//
-// tpl := template.New("foo").Funcs(sprig.FuncMap()))
-//
+// tpl := template.New("foo").Funcs(sprig.FuncMap()))
func FuncMap() template.FuncMap {
return HtmlFuncMap()
}
@@ -142,10 +141,13 @@ var genericMap = map[string]interface{}{
"swapcase": util.SwapCase,
"shuffle": xstrings.Shuffle,
"snakecase": xstrings.ToSnakeCase,
- "camelcase": xstrings.ToCamelCase,
- "kebabcase": xstrings.ToKebabCase,
- "wrap": func(l int, s string) string { return util.Wrap(s, l) },
- "wrapWith": func(l int, sep, str string) string { return util.WrapCustom(str, l, sep, true) },
+ // camelcase used to call xstrings.ToCamelCase, but that function had a breaking change in version
+ // 1.5 that moved it from upper camel case to lower camel case. This is a breaking change for sprig.
+ // A new xstrings.ToPascalCase function was added that provided upper camel case.
+ "camelcase": xstrings.ToPascalCase,
+ "kebabcase": xstrings.ToKebabCase,
+ "wrap": func(l int, s string) string { return util.Wrap(s, l) },
+ "wrapWith": func(l int, sep, str string) string { return util.WrapCustom(str, l, sep, true) },
// Switch order so that "foobar" | contains "foo"
"contains": func(substr string, str string) bool { return strings.Contains(str, substr) },
"hasPrefix": func(substr string, str string) bool { return strings.HasPrefix(str, substr) },
@@ -159,6 +161,7 @@ var genericMap = map[string]interface{}{
"plural": plural,
"sha1sum": sha1sum,
"sha256sum": sha256sum,
+ "sha512sum": sha512sum,
"adler32sum": adler32sum,
"toString": strval,
@@ -336,20 +339,20 @@ var genericMap = map[string]interface{}{
"mustChunk": mustChunk,
// Crypto:
- "bcrypt": bcrypt,
- "htpasswd": htpasswd,
- "genPrivateKey": generatePrivateKey,
- "derivePassword": derivePassword,
- "buildCustomCert": buildCustomCertificate,
- "genCA": generateCertificateAuthority,
- "genCAWithKey": generateCertificateAuthorityWithPEMKey,
- "genSelfSignedCert": generateSelfSignedCertificate,
+ "bcrypt": bcrypt,
+ "htpasswd": htpasswd,
+ "genPrivateKey": generatePrivateKey,
+ "derivePassword": derivePassword,
+ "buildCustomCert": buildCustomCertificate,
+ "genCA": generateCertificateAuthority,
+ "genCAWithKey": generateCertificateAuthorityWithPEMKey,
+ "genSelfSignedCert": generateSelfSignedCertificate,
"genSelfSignedCertWithKey": generateSelfSignedCertificateWithPEMKey,
- "genSignedCert": generateSignedCertificate,
- "genSignedCertWithKey": generateSignedCertificateWithPEMKey,
- "encryptAES": encryptAES,
- "decryptAES": decryptAES,
- "randBytes": randBytes,
+ "genSignedCert": generateSignedCertificate,
+ "genSignedCertWithKey": generateSignedCertificateWithPEMKey,
+ "encryptAES": encryptAES,
+ "decryptAES": decryptAES,
+ "randBytes": randBytes,
// UUIDs:
"uuidv4": uuidv4,
diff --git a/vendor/github.com/adrg/xdg/README.md b/vendor/github.com/adrg/xdg/README.md
index 4a452dea1..cf16c5a5e 100644
--- a/vendor/github.com/adrg/xdg/README.md
+++ b/vendor/github.com/adrg/xdg/README.md
@@ -41,8 +41,9 @@ Provides an implementation of the [XDG Base Directory Specification](https://spe
The specification defines a set of standard paths for storing application files,
including data and configuration files. For portability and flexibility reasons,
applications should use the XDG defined locations instead of hardcoding paths.
-The package also includes the locations of well known [user directories](https://wiki.archlinux.org/index.php/XDG_user_directories), as well as
-other common directories such as fonts and applications.
+
+The package also includes the locations of well known [user directories](https://wiki.archlinux.org/index.php/XDG_user_directories),
+support for the non-standard `XDG_BIN_HOME` directory, as well as other common directories such as fonts and applications.
The current implementation supports **most flavors of Unix**, **Windows**, **macOS** and **Plan 9**.
On Windows, where XDG environment variables are not usually set, the package uses [Known Folders](https://docs.microsoft.com/en-us/windows/win32/shell/known-folders)
@@ -70,15 +71,16 @@ Sensible fallback locations are used for the folders which are not set.
-| |
Unix
|macOS
|Plan 9
| -| :------------------------------------------------------------: | :-----------------------------------------------------------------------: | :-------------------------------------------------------------------------------------------------------------------: | :-------------------------------------------------------------------------: | -| XDG_DATA_HOME | ~/.local/share | ~/Library/Application Support | $home/lib | -| XDG_DATA_DIRS | /usr/local/shareUnix
|macOS
|Plan 9
| +| :------------------------------------------------------------: | :-----------------------------------------------------------------------: | :-------------------------------------------------------------------------------------------------------------------------------------------------: | :-------------------------------------------------------------------------: | +| XDG_DATA_HOME | ~/.local/share | ~/Library/Application Support | $home/lib | +| XDG_DATA_DIRS | /usr/local/shareKnown Folder(s)
|Fallback(s)
| -| :-----------------------------------------------------------: | :--------------------------------------------------------------------------------------: | :-------------------------------------------------------------------------------------------------------------------------------------: | -| Home | Profile | %USERPROFILE% | -| Applications | ProgramsKnown Folder(s)
|Fallback(s)
| +| :-----------------------------------------------------------: | :------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | :--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| Home | Profile | %USERPROFILE% | +| Applications | Programs-// This implementation prints messages to {@link System//err} containing the -// values of {@code line}, {@code charPositionInLine}, and {@code msg} using -// the following format.
-// -//-// line line:charPositionInLine msg -//-func (c *ConsoleErrorListener) SyntaxError(recognizer Recognizer, offendingSymbol interface{}, line, column int, msg string, e RecognitionException) { - fmt.Fprintln(os.Stderr, "line "+strconv.Itoa(line)+":"+strconv.Itoa(column)+" "+msg) -} - -type ProxyErrorListener struct { - *DefaultErrorListener - delegates []ErrorListener -} - -func NewProxyErrorListener(delegates []ErrorListener) *ProxyErrorListener { - if delegates == nil { - panic("delegates is not provided") - } - l := new(ProxyErrorListener) - l.delegates = delegates - return l -} - -func (p *ProxyErrorListener) SyntaxError(recognizer Recognizer, offendingSymbol interface{}, line, column int, msg string, e RecognitionException) { - for _, d := range p.delegates { - d.SyntaxError(recognizer, offendingSymbol, line, column, msg, e) - } -} - -func (p *ProxyErrorListener) ReportAmbiguity(recognizer Parser, dfa *DFA, startIndex, stopIndex int, exact bool, ambigAlts *BitSet, configs ATNConfigSet) { - for _, d := range p.delegates { - d.ReportAmbiguity(recognizer, dfa, startIndex, stopIndex, exact, ambigAlts, configs) - } -} - -func (p *ProxyErrorListener) ReportAttemptingFullContext(recognizer Parser, dfa *DFA, startIndex, stopIndex int, conflictingAlts *BitSet, configs ATNConfigSet) { - for _, d := range p.delegates { - d.ReportAttemptingFullContext(recognizer, dfa, startIndex, stopIndex, conflictingAlts, configs) - } -} - -func (p *ProxyErrorListener) ReportContextSensitivity(recognizer Parser, dfa *DFA, startIndex, stopIndex, prediction int, configs ATNConfigSet) { - for _, d := range p.delegates { - d.ReportContextSensitivity(recognizer, dfa, startIndex, stopIndex, prediction, configs) - } -} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/error_strategy.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/error_strategy.go deleted file mode 100644 index 5c0a637ba..000000000 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/error_strategy.go +++ /dev/null @@ -1,734 +0,0 @@ -// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. -// Use of this file is governed by the BSD 3-clause license that -// can be found in the LICENSE.txt file in the project root. - -package antlr - -import ( - "fmt" - "reflect" - "strconv" - "strings" -) - -type ErrorStrategy interface { - reset(Parser) - RecoverInline(Parser) Token - Recover(Parser, RecognitionException) - Sync(Parser) - InErrorRecoveryMode(Parser) bool - ReportError(Parser, RecognitionException) - ReportMatch(Parser) -} - -// This is the default implementation of {@link ANTLRErrorStrategy} used for -// error Reporting and recovery in ANTLR parsers. -type DefaultErrorStrategy struct { - errorRecoveryMode bool - lastErrorIndex int - lastErrorStates *IntervalSet -} - -var _ ErrorStrategy = &DefaultErrorStrategy{} - -func NewDefaultErrorStrategy() *DefaultErrorStrategy { - - d := new(DefaultErrorStrategy) - - // Indicates whether the error strategy is currently "recovering from an - // error". This is used to suppress Reporting multiple error messages while - // attempting to recover from a detected syntax error. - // - // @see //InErrorRecoveryMode - // - d.errorRecoveryMode = false - - // The index into the input stream where the last error occurred. - // This is used to prevent infinite loops where an error is found - // but no token is consumed during recovery...another error is found, - // ad nauseum. This is a failsafe mechanism to guarantee that at least - // one token/tree node is consumed for two errors. - // - d.lastErrorIndex = -1 - d.lastErrorStates = nil - return d -} - -//
The default implementation simply calls {@link //endErrorCondition} to -// ensure that the handler is not in error recovery mode.
-func (d *DefaultErrorStrategy) reset(recognizer Parser) { - d.endErrorCondition(recognizer) -} - -// This method is called to enter error recovery mode when a recognition -// exception is Reported. -// -// @param recognizer the parser instance -func (d *DefaultErrorStrategy) beginErrorCondition(recognizer Parser) { - d.errorRecoveryMode = true -} - -func (d *DefaultErrorStrategy) InErrorRecoveryMode(recognizer Parser) bool { - return d.errorRecoveryMode -} - -// This method is called to leave error recovery mode after recovering from -// a recognition exception. -// -// @param recognizer -func (d *DefaultErrorStrategy) endErrorCondition(recognizer Parser) { - d.errorRecoveryMode = false - d.lastErrorStates = nil - d.lastErrorIndex = -1 -} - -// {@inheritDoc} -// -//The default implementation simply calls {@link //endErrorCondition}.
-func (d *DefaultErrorStrategy) ReportMatch(recognizer Parser) { - d.endErrorCondition(recognizer) -} - -// {@inheritDoc} -// -//The default implementation returns immediately if the handler is already -// in error recovery mode. Otherwise, it calls {@link //beginErrorCondition} -// and dispatches the Reporting task based on the runtime type of {@code e} -// according to the following table.
-// -//The default implementation reSynchronizes the parser by consuming tokens -// until we find one in the reSynchronization set--loosely the set of tokens -// that can follow the current rule.
-func (d *DefaultErrorStrategy) Recover(recognizer Parser, e RecognitionException) { - - if d.lastErrorIndex == recognizer.GetInputStream().Index() && - d.lastErrorStates != nil && d.lastErrorStates.contains(recognizer.GetState()) { - // uh oh, another error at same token index and previously-Visited - // state in ATN must be a case where LT(1) is in the recovery - // token set so nothing got consumed. Consume a single token - // at least to prevent an infinite loop d is a failsafe. - recognizer.Consume() - } - d.lastErrorIndex = recognizer.GetInputStream().Index() - if d.lastErrorStates == nil { - d.lastErrorStates = NewIntervalSet() - } - d.lastErrorStates.addOne(recognizer.GetState()) - followSet := d.getErrorRecoverySet(recognizer) - d.consumeUntil(recognizer, followSet) -} - -// The default implementation of {@link ANTLRErrorStrategy//Sync} makes sure -// that the current lookahead symbol is consistent with what were expecting -// at d point in the ATN. You can call d anytime but ANTLR only -// generates code to check before subrules/loops and each iteration. -// -//Implements Jim Idle's magic Sync mechanism in closures and optional -// subrules. E.g.,
-// -//
-// a : Sync ( stuff Sync )*
-// Sync : {consume to what can follow Sync}
-//
-//
-// At the start of a sub rule upon error, {@link //Sync} performs single
-// token deletion, if possible. If it can't do that, it bails on the current
-// rule and uses the default error recovery, which consumes until the
-// reSynchronization set of the current rule.
-//
-// If the sub rule is optional ({@code (...)?}, {@code (...)*}, or block -// with an empty alternative), then the expected set includes what follows -// the subrule.
-// -//During loop iteration, it consumes until it sees a token that can start a -// sub rule or what follows loop. Yes, that is pretty aggressive. We opt to -// stay in the loop as long as possible.
-// -//ORIGINS
-// -//Previous versions of ANTLR did a poor job of their recovery within loops. -// A single mismatch token or missing token would force the parser to bail -// out of the entire rules surrounding the loop. So, for rule
-// -//
-// classfunc : 'class' ID '{' member* '}'
-//
-//
-// input with an extra token between members would force the parser to
-// consume until it found the next class definition rather than the next
-// member definition of the current class.
-//
-// This functionality cost a little bit of effort because the parser has to -// compare token set at the start of the loop and at each iteration. If for -// some reason speed is suffering for you, you can turn off d -// functionality by simply overriding d method as a blank { }.
-func (d *DefaultErrorStrategy) Sync(recognizer Parser) { - // If already recovering, don't try to Sync - if d.InErrorRecoveryMode(recognizer) { - return - } - - s := recognizer.GetInterpreter().atn.states[recognizer.GetState()] - la := recognizer.GetTokenStream().LA(1) - - // try cheaper subset first might get lucky. seems to shave a wee bit off - nextTokens := recognizer.GetATN().NextTokens(s, nil) - if nextTokens.contains(TokenEpsilon) || nextTokens.contains(la) { - return - } - - switch s.GetStateType() { - case ATNStateBlockStart, ATNStateStarBlockStart, ATNStatePlusBlockStart, ATNStateStarLoopEntry: - // Report error and recover if possible - if d.SingleTokenDeletion(recognizer) != nil { - return - } - panic(NewInputMisMatchException(recognizer)) - case ATNStatePlusLoopBack, ATNStateStarLoopBack: - d.ReportUnwantedToken(recognizer) - expecting := NewIntervalSet() - expecting.addSet(recognizer.GetExpectedTokens()) - whatFollowsLoopIterationOrRule := expecting.addSet(d.getErrorRecoverySet(recognizer)) - d.consumeUntil(recognizer, whatFollowsLoopIterationOrRule) - default: - // do nothing if we can't identify the exact kind of ATN state - } -} - -// This is called by {@link //ReportError} when the exception is a -// {@link NoViableAltException}. -// -// @see //ReportError -// -// @param recognizer the parser instance -// @param e the recognition exception -func (d *DefaultErrorStrategy) ReportNoViableAlternative(recognizer Parser, e *NoViableAltException) { - tokens := recognizer.GetTokenStream() - var input string - if tokens != nil { - if e.startToken.GetTokenType() == TokenEOF { - input = "This method is called when {@link //singleTokenDeletion} identifies -// single-token deletion as a viable recovery strategy for a mismatched -// input error.
-// -//The default implementation simply returns if the handler is already in -// error recovery mode. Otherwise, it calls {@link //beginErrorCondition} to -// enter error recovery mode, followed by calling -// {@link Parser//NotifyErrorListeners}.
-// -// @param recognizer the parser instance -func (d *DefaultErrorStrategy) ReportUnwantedToken(recognizer Parser) { - if d.InErrorRecoveryMode(recognizer) { - return - } - d.beginErrorCondition(recognizer) - t := recognizer.GetCurrentToken() - tokenName := d.GetTokenErrorDisplay(t) - expecting := d.GetExpectedTokens(recognizer) - msg := "extraneous input " + tokenName + " expecting " + - expecting.StringVerbose(recognizer.GetLiteralNames(), recognizer.GetSymbolicNames(), false) - recognizer.NotifyErrorListeners(msg, t, nil) -} - -// This method is called to Report a syntax error which requires the -// insertion of a missing token into the input stream. At the time d -// method is called, the missing token has not yet been inserted. When d -// method returns, {@code recognizer} is in error recovery mode. -// -//This method is called when {@link //singleTokenInsertion} identifies -// single-token insertion as a viable recovery strategy for a mismatched -// input error.
-// -//The default implementation simply returns if the handler is already in -// error recovery mode. Otherwise, it calls {@link //beginErrorCondition} to -// enter error recovery mode, followed by calling -// {@link Parser//NotifyErrorListeners}.
-// -// @param recognizer the parser instance -func (d *DefaultErrorStrategy) ReportMissingToken(recognizer Parser) { - if d.InErrorRecoveryMode(recognizer) { - return - } - d.beginErrorCondition(recognizer) - t := recognizer.GetCurrentToken() - expecting := d.GetExpectedTokens(recognizer) - msg := "missing " + expecting.StringVerbose(recognizer.GetLiteralNames(), recognizer.GetSymbolicNames(), false) + - " at " + d.GetTokenErrorDisplay(t) - recognizer.NotifyErrorListeners(msg, t, nil) -} - -//The default implementation attempts to recover from the mismatched input -// by using single token insertion and deletion as described below. If the -// recovery attempt fails, d method panics an -// {@link InputMisMatchException}.
-// -//EXTRA TOKEN (single token deletion)
-// -//{@code LA(1)} is not what we are looking for. If {@code LA(2)} has the -// right token, however, then assume {@code LA(1)} is some extra spurious -// token and delete it. Then consume and return the next token (which was -// the {@code LA(2)} token) as the successful result of the Match operation.
-// -//This recovery strategy is implemented by {@link -// //singleTokenDeletion}.
-// -//MISSING TOKEN (single token insertion)
-// -//If current token (at {@code LA(1)}) is consistent with what could come -// after the expected {@code LA(1)} token, then assume the token is missing -// and use the parser's {@link TokenFactory} to create it on the fly. The -// "insertion" is performed by returning the created token as the successful -// result of the Match operation.
-// -//This recovery strategy is implemented by {@link -// //singleTokenInsertion}.
-// -//EXAMPLE
-// -//For example, Input {@code i=(3} is clearly missing the {@code ')'}. When -// the parser returns from the nested call to {@code expr}, it will have -// call chain:
-// -//-// stat &rarr expr &rarr atom -//-// -// and it will be trying to Match the {@code ')'} at d point in the -// derivation: -// -//
-// => ID '=' '(' INT ')' ('+' atom)* ”
-// ^
-//
-//
-// The attempt to Match {@code ')'} will fail when it sees {@code ”} and
-// call {@link //recoverInline}. To recover, it sees that {@code LA(1)==”}
-// is in the set of tokens that can follow the {@code ')'} token reference
-// in rule {@code atom}. It can assume that you forgot the {@code ')'}.
-func (d *DefaultErrorStrategy) RecoverInline(recognizer Parser) Token {
- // SINGLE TOKEN DELETION
- MatchedSymbol := d.SingleTokenDeletion(recognizer)
- if MatchedSymbol != nil {
- // we have deleted the extra token.
- // now, move past ttype token as if all were ok
- recognizer.Consume()
- return MatchedSymbol
- }
- // SINGLE TOKEN INSERTION
- if d.SingleTokenInsertion(recognizer) {
- return d.GetMissingSymbol(recognizer)
- }
- // even that didn't work must panic the exception
- panic(NewInputMisMatchException(recognizer))
-}
-
-// This method implements the single-token insertion inline error recovery
-// strategy. It is called by {@link //recoverInline} if the single-token
-// deletion strategy fails to recover from the mismatched input. If this
-// method returns {@code true}, {@code recognizer} will be in error recovery
-// mode.
-//
-// This method determines whether or not single-token insertion is viable by -// checking if the {@code LA(1)} input symbol could be successfully Matched -// if it were instead the {@code LA(2)} symbol. If d method returns -// {@code true}, the caller is responsible for creating and inserting a -// token with the correct type to produce d behavior.
-// -// @param recognizer the parser instance -// @return {@code true} if single-token insertion is a viable recovery -// strategy for the current mismatched input, otherwise {@code false} -func (d *DefaultErrorStrategy) SingleTokenInsertion(recognizer Parser) bool { - currentSymbolType := recognizer.GetTokenStream().LA(1) - // if current token is consistent with what could come after current - // ATN state, then we know we're missing a token error recovery - // is free to conjure up and insert the missing token - atn := recognizer.GetInterpreter().atn - currentState := atn.states[recognizer.GetState()] - next := currentState.GetTransitions()[0].getTarget() - expectingAtLL2 := atn.NextTokens(next, recognizer.GetParserRuleContext()) - if expectingAtLL2.contains(currentSymbolType) { - d.ReportMissingToken(recognizer) - return true - } - - return false -} - -// This method implements the single-token deletion inline error recovery -// strategy. It is called by {@link //recoverInline} to attempt to recover -// from mismatched input. If this method returns nil, the parser and error -// handler state will not have changed. If this method returns non-nil, -// {@code recognizer} will not be in error recovery mode since the -// returned token was a successful Match. -// -//If the single-token deletion is successful, d method calls -// {@link //ReportUnwantedToken} to Report the error, followed by -// {@link Parser//consume} to actually "delete" the extraneous token. Then, -// before returning {@link //ReportMatch} is called to signal a successful -// Match.
-// -// @param recognizer the parser instance -// @return the successfully Matched {@link Token} instance if single-token -// deletion successfully recovers from the mismatched input, otherwise -// {@code nil} -func (d *DefaultErrorStrategy) SingleTokenDeletion(recognizer Parser) Token { - NextTokenType := recognizer.GetTokenStream().LA(2) - expecting := d.GetExpectedTokens(recognizer) - if expecting.contains(NextTokenType) { - d.ReportUnwantedToken(recognizer) - // print("recoverFromMisMatchedToken deleting " \ - // + str(recognizer.GetTokenStream().LT(1)) \ - // + " since " + str(recognizer.GetTokenStream().LT(2)) \ - // + " is what we want", file=sys.stderr) - recognizer.Consume() // simply delete extra token - // we want to return the token we're actually Matching - MatchedSymbol := recognizer.GetCurrentToken() - d.ReportMatch(recognizer) // we know current token is correct - return MatchedSymbol - } - - return nil -} - -// Conjure up a missing token during error recovery. -// -// The recognizer attempts to recover from single missing -// symbols. But, actions might refer to that missing symbol. -// For example, x=ID {f($x)}. The action clearly assumes -// that there has been an identifier Matched previously and that -// $x points at that token. If that token is missing, but -// the next token in the stream is what we want we assume that -// d token is missing and we keep going. Because we -// have to return some token to replace the missing token, -// we have to conjure one up. This method gives the user control -// over the tokens returned for missing tokens. Mostly, -// you will want to create something special for identifier -// tokens. For literals such as '{' and ',', the default -// action in the parser or tree parser works. It simply creates -// a CommonToken of the appropriate type. The text will be the token. -// If you change what tokens must be created by the lexer, -// override d method to create the appropriate tokens. -func (d *DefaultErrorStrategy) GetMissingSymbol(recognizer Parser) Token { - currentSymbol := recognizer.GetCurrentToken() - expecting := d.GetExpectedTokens(recognizer) - expectedTokenType := expecting.first() - var tokenText string - - if expectedTokenType == TokenEOF { - tokenText = "-// This error strategy is useful in the following scenarios.
-// -//-// {@code myparser.setErrorHandler(NewBailErrorStrategy())}
-// -// @see Parser//setErrorHandler(ANTLRErrorStrategy) - -type BailErrorStrategy struct { - *DefaultErrorStrategy -} - -var _ ErrorStrategy = &BailErrorStrategy{} - -func NewBailErrorStrategy() *BailErrorStrategy { - - b := new(BailErrorStrategy) - - b.DefaultErrorStrategy = NewDefaultErrorStrategy() - - return b -} - -// Instead of recovering from exception {@code e}, re-panic it wrapped -// in a {@link ParseCancellationException} so it is not caught by the -// rule func catches. Use {@link Exception//getCause()} to get the -// original {@link RecognitionException}. -func (b *BailErrorStrategy) Recover(recognizer Parser, e RecognitionException) { - context := recognizer.GetParserRuleContext() - for context != nil { - context.SetException(e) - if parent, ok := context.GetParent().(ParserRuleContext); ok { - context = parent - } else { - context = nil - } - } - panic(NewParseCancellationException()) // TODO we don't emit e properly -} - -// Make sure we don't attempt to recover inline if the parser -// successfully recovers, it won't panic an exception. -func (b *BailErrorStrategy) RecoverInline(recognizer Parser) Token { - b.Recover(recognizer, NewInputMisMatchException(recognizer)) - - return nil -} - -// Make sure we don't attempt to recover from problems in subrules.// -func (b *BailErrorStrategy) Sync(recognizer Parser) { - // pass -} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/errors.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/errors.go deleted file mode 100644 index 3954c1378..000000000 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/errors.go +++ /dev/null @@ -1,238 +0,0 @@ -// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. -// Use of this file is governed by the BSD 3-clause license that -// can be found in the LICENSE.txt file in the project root. - -package antlr - -// The root of the ANTLR exception hierarchy. In general, ANTLR tracks just -// 3 kinds of errors: prediction errors, failed predicate errors, and -// mismatched input errors. In each case, the parser knows where it is -// in the input, where it is in the ATN, the rule invocation stack, -// and what kind of problem occurred. - -type RecognitionException interface { - GetOffendingToken() Token - GetMessage() string - GetInputStream() IntStream -} - -type BaseRecognitionException struct { - message string - recognizer Recognizer - offendingToken Token - offendingState int - ctx RuleContext - input IntStream -} - -func NewBaseRecognitionException(message string, recognizer Recognizer, input IntStream, ctx RuleContext) *BaseRecognitionException { - - // todo - // Error.call(this) - // - // if (!!Error.captureStackTrace) { - // Error.captureStackTrace(this, RecognitionException) - // } else { - // stack := NewError().stack - // } - // TODO may be able to use - "runtime" func Stack(buf []byte, all bool) int - - t := new(BaseRecognitionException) - - t.message = message - t.recognizer = recognizer - t.input = input - t.ctx = ctx - // The current {@link Token} when an error occurred. Since not all streams - // support accessing symbols by index, we have to track the {@link Token} - // instance itself. - t.offendingToken = nil - // Get the ATN state number the parser was in at the time the error - // occurred. For {@link NoViableAltException} and - // {@link LexerNoViableAltException} exceptions, this is the - // {@link DecisionState} number. For others, it is the state whose outgoing - // edge we couldn't Match. - t.offendingState = -1 - if t.recognizer != nil { - t.offendingState = t.recognizer.GetState() - } - - return t -} - -func (b *BaseRecognitionException) GetMessage() string { - return b.message -} - -func (b *BaseRecognitionException) GetOffendingToken() Token { - return b.offendingToken -} - -func (b *BaseRecognitionException) GetInputStream() IntStream { - return b.input -} - -//If the state number is not known, b method returns -1.
- -// Gets the set of input symbols which could potentially follow the -// previously Matched symbol at the time b exception was panicn. -// -//If the set of expected tokens is not known and could not be computed, -// b method returns {@code nil}.
-// -// @return The set of token types that could potentially follow the current -// state in the ATN, or {@code nil} if the information is not available. -// / -func (b *BaseRecognitionException) getExpectedTokens() *IntervalSet { - if b.recognizer != nil { - return b.recognizer.GetATN().getExpectedTokens(b.offendingState, b.ctx) - } - - return nil -} - -func (b *BaseRecognitionException) String() string { - return b.message -} - -type LexerNoViableAltException struct { - *BaseRecognitionException - - startIndex int - deadEndConfigs ATNConfigSet -} - -func NewLexerNoViableAltException(lexer Lexer, input CharStream, startIndex int, deadEndConfigs ATNConfigSet) *LexerNoViableAltException { - - l := new(LexerNoViableAltException) - - l.BaseRecognitionException = NewBaseRecognitionException("", lexer, input, nil) - - l.startIndex = startIndex - l.deadEndConfigs = deadEndConfigs - - return l -} - -func (l *LexerNoViableAltException) String() string { - symbol := "" - if l.startIndex >= 0 && l.startIndex < l.input.Size() { - symbol = l.input.(CharStream).GetTextFromInterval(NewInterval(l.startIndex, l.startIndex)) - } - return "LexerNoViableAltException" + symbol -} - -type NoViableAltException struct { - *BaseRecognitionException - - startToken Token - offendingToken Token - ctx ParserRuleContext - deadEndConfigs ATNConfigSet -} - -// Indicates that the parser could not decide which of two or more paths -// to take based upon the remaining input. It tracks the starting token -// of the offending input and also knows where the parser was -// in the various paths when the error. Reported by ReportNoViableAlternative() -func NewNoViableAltException(recognizer Parser, input TokenStream, startToken Token, offendingToken Token, deadEndConfigs ATNConfigSet, ctx ParserRuleContext) *NoViableAltException { - - if ctx == nil { - ctx = recognizer.GetParserRuleContext() - } - - if offendingToken == nil { - offendingToken = recognizer.GetCurrentToken() - } - - if startToken == nil { - startToken = recognizer.GetCurrentToken() - } - - if input == nil { - input = recognizer.GetInputStream().(TokenStream) - } - - n := new(NoViableAltException) - n.BaseRecognitionException = NewBaseRecognitionException("", recognizer, input, ctx) - - // Which configurations did we try at input.Index() that couldn't Match - // input.LT(1)?// - n.deadEndConfigs = deadEndConfigs - // The token object at the start index the input stream might - // not be buffering tokens so get a reference to it. (At the - // time the error occurred, of course the stream needs to keep a - // buffer all of the tokens but later we might not have access to those.) - n.startToken = startToken - n.offendingToken = offendingToken - - return n -} - -type InputMisMatchException struct { - *BaseRecognitionException -} - -// This signifies any kind of mismatched input exceptions such as -// when the current input does not Match the expected token. -func NewInputMisMatchException(recognizer Parser) *InputMisMatchException { - - i := new(InputMisMatchException) - i.BaseRecognitionException = NewBaseRecognitionException("", recognizer, recognizer.GetInputStream(), recognizer.GetParserRuleContext()) - - i.offendingToken = recognizer.GetCurrentToken() - - return i - -} - -// A semantic predicate failed during validation. Validation of predicates -// occurs when normally parsing the alternative just like Matching a token. -// Disambiguating predicate evaluation occurs when we test a predicate during -// prediction. - -type FailedPredicateException struct { - *BaseRecognitionException - - ruleIndex int - predicateIndex int - predicate string -} - -func NewFailedPredicateException(recognizer Parser, predicate string, message string) *FailedPredicateException { - - f := new(FailedPredicateException) - - f.BaseRecognitionException = NewBaseRecognitionException(f.formatMessage(predicate, message), recognizer, recognizer.GetInputStream(), recognizer.GetParserRuleContext()) - - s := recognizer.GetInterpreter().atn.states[recognizer.GetState()] - trans := s.GetTransitions()[0] - if trans2, ok := trans.(*PredicateTransition); ok { - f.ruleIndex = trans2.ruleIndex - f.predicateIndex = trans2.predIndex - } else { - f.ruleIndex = 0 - f.predicateIndex = 0 - } - f.predicate = predicate - f.offendingToken = recognizer.GetCurrentToken() - - return f -} - -func (f *FailedPredicateException) formatMessage(predicate, message string) string { - if message != "" { - return message - } - - return "failed predicate: {" + predicate + "}?" -} - -type ParseCancellationException struct { -} - -func NewParseCancellationException() *ParseCancellationException { - // Error.call(this) - // Error.captureStackTrace(this, ParseCancellationException) - return new(ParseCancellationException) -} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/file_stream.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/file_stream.go deleted file mode 100644 index bd6ad5efe..000000000 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/file_stream.go +++ /dev/null @@ -1,49 +0,0 @@ -// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. -// Use of this file is governed by the BSD 3-clause license that -// can be found in the LICENSE.txt file in the project root. - -package antlr - -import ( - "bytes" - "io" - "os" -) - -// This is an InputStream that is loaded from a file all at once -// when you construct the object. - -type FileStream struct { - *InputStream - - filename string -} - -func NewFileStream(fileName string) (*FileStream, error) { - - buf := bytes.NewBuffer(nil) - - f, err := os.Open(fileName) - if err != nil { - return nil, err - } - defer f.Close() - _, err = io.Copy(buf, f) - if err != nil { - return nil, err - } - - fs := new(FileStream) - - fs.filename = fileName - s := string(buf.Bytes()) - - fs.InputStream = NewInputStream(s) - - return fs, nil - -} - -func (f *FileStream) GetSourceName() string { - return f.filename -} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/input_stream.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/input_stream.go deleted file mode 100644 index a8b889ced..000000000 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/input_stream.go +++ /dev/null @@ -1,113 +0,0 @@ -// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. -// Use of this file is governed by the BSD 3-clause license that -// can be found in the LICENSE.txt file in the project root. - -package antlr - -type InputStream struct { - name string - index int - data []rune - size int -} - -func NewInputStream(data string) *InputStream { - - is := new(InputStream) - - is.name = "The executor tracks position information for position-dependent lexer actions -// efficiently, ensuring that actions appearing only at the end of the rule do -// not cause bloating of the {@link DFA} created for the lexer.
- -type LexerActionExecutor struct { - lexerActions []LexerAction - cachedHash int -} - -func NewLexerActionExecutor(lexerActions []LexerAction) *LexerActionExecutor { - - if lexerActions == nil { - lexerActions = make([]LexerAction, 0) - } - - l := new(LexerActionExecutor) - - l.lexerActions = lexerActions - - // Caches the result of {@link //hashCode} since the hash code is an element - // of the performance-critical {@link LexerATNConfig//hashCode} operation. - l.cachedHash = murmurInit(57) - for _, a := range lexerActions { - l.cachedHash = murmurUpdate(l.cachedHash, a.Hash()) - } - - return l -} - -// Creates a {@link LexerActionExecutor} which executes the actions for -// the input {@code lexerActionExecutor} followed by a specified -// {@code lexerAction}. -// -// @param lexerActionExecutor The executor for actions already traversed by -// the lexer while Matching a token within a particular -// {@link LexerATNConfig}. If this is {@code nil}, the method behaves as -// though it were an empty executor. -// @param lexerAction The lexer action to execute after the actions -// specified in {@code lexerActionExecutor}. -// -// @return A {@link LexerActionExecutor} for executing the combine actions -// of {@code lexerActionExecutor} and {@code lexerAction}. -func LexerActionExecutorappend(lexerActionExecutor *LexerActionExecutor, lexerAction LexerAction) *LexerActionExecutor { - if lexerActionExecutor == nil { - return NewLexerActionExecutor([]LexerAction{lexerAction}) - } - - return NewLexerActionExecutor(append(lexerActionExecutor.lexerActions, lexerAction)) -} - -// Creates a {@link LexerActionExecutor} which encodes the current offset -// for position-dependent lexer actions. -// -//Normally, when the executor encounters lexer actions where -// {@link LexerAction//isPositionDependent} returns {@code true}, it calls -// {@link IntStream//seek} on the input {@link CharStream} to set the input -// position to the end of the current token. This behavior provides -// for efficient DFA representation of lexer actions which appear at the end -// of a lexer rule, even when the lexer rule Matches a variable number of -// characters.
-// -//Prior to traversing a Match transition in the ATN, the current offset -// from the token start index is assigned to all position-dependent lexer -// actions which have not already been assigned a fixed offset. By storing -// the offsets relative to the token start index, the DFA representation of -// lexer actions which appear in the middle of tokens remains efficient due -// to sharing among tokens of the same length, regardless of their absolute -// position in the input stream.
-// -//If the current executor already has offsets assigned to all -// position-dependent lexer actions, the method returns {@code this}.
-// -// @param offset The current offset to assign to all position-dependent -// lexer actions which do not already have offsets assigned. -// -// @return A {@link LexerActionExecutor} which stores input stream offsets -// for all position-dependent lexer actions. -// / -func (l *LexerActionExecutor) fixOffsetBeforeMatch(offset int) *LexerActionExecutor { - var updatedLexerActions []LexerAction - for i := 0; i < len(l.lexerActions); i++ { - _, ok := l.lexerActions[i].(*LexerIndexedCustomAction) - if l.lexerActions[i].getIsPositionDependent() && !ok { - if updatedLexerActions == nil { - updatedLexerActions = make([]LexerAction, 0) - - for _, a := range l.lexerActions { - updatedLexerActions = append(updatedLexerActions, a) - } - } - - updatedLexerActions[i] = NewLexerIndexedCustomAction(offset, l.lexerActions[i]) - } - } - if updatedLexerActions == nil { - return l - } - - return NewLexerActionExecutor(updatedLexerActions) -} - -// Execute the actions encapsulated by l executor within the context of a -// particular {@link Lexer}. -// -//This method calls {@link IntStream//seek} to set the position of the -// {@code input} {@link CharStream} prior to calling -// {@link LexerAction//execute} on a position-dependent action. Before the -// method returns, the input position will be restored to the same position -// it was in when the method was invoked.
-// -// @param lexer The lexer instance. -// @param input The input stream which is the source for the current token. -// When l method is called, the current {@link IntStream//index} for -// {@code input} should be the start of the following token, i.e. 1 -// character past the end of the current token. -// @param startIndex The token start index. This value may be passed to -// {@link IntStream//seek} to set the {@code input} position to the beginning -// of the token. -// / -func (l *LexerActionExecutor) execute(lexer Lexer, input CharStream, startIndex int) { - requiresSeek := false - stopIndex := input.Index() - - defer func() { - if requiresSeek { - input.Seek(stopIndex) - } - }() - - for i := 0; i < len(l.lexerActions); i++ { - lexerAction := l.lexerActions[i] - if la, ok := lexerAction.(*LexerIndexedCustomAction); ok { - offset := la.offset - input.Seek(startIndex + offset) - lexerAction = la.lexerAction - requiresSeek = (startIndex + offset) != stopIndex - } else if lexerAction.getIsPositionDependent() { - input.Seek(stopIndex) - requiresSeek = false - } - lexerAction.execute(lexer) - } -} - -func (l *LexerActionExecutor) Hash() int { - if l == nil { - // TODO: Why is this here? l should not be nil - return 61 - } - - // TODO: This is created from the action itself when the struct is created - will this be an issue at some point? Java uses the runtime assign hashcode - return l.cachedHash -} - -func (l *LexerActionExecutor) Equals(other interface{}) bool { - if l == other { - return true - } - othert, ok := other.(*LexerActionExecutor) - if !ok { - return false - } - if othert == nil { - return false - } - if l.cachedHash != othert.cachedHash { - return false - } - if len(l.lexerActions) != len(othert.lexerActions) { - return false - } - return slices.EqualFunc(l.lexerActions, othert.lexerActions, func(i, j LexerAction) bool { - return i.Equals(j) - }) -} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/ll1_analyzer.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/ll1_analyzer.go deleted file mode 100644 index 76689615a..000000000 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/ll1_analyzer.go +++ /dev/null @@ -1,216 +0,0 @@ -// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. -// Use of this file is governed by the BSD 3-clause license that -// can be found in the LICENSE.txt file in the project root. - -package antlr - -type LL1Analyzer struct { - atn *ATN -} - -func NewLL1Analyzer(atn *ATN) *LL1Analyzer { - la := new(LL1Analyzer) - la.atn = atn - return la -} - -// - Special value added to the lookahead sets to indicate that we hit -// a predicate during analysis if {@code seeThruPreds==false}. -// -// / -const ( - LL1AnalyzerHitPred = TokenInvalidType -) - -// * -// Calculates the SLL(1) expected lookahead set for each outgoing transition -// of an {@link ATNState}. The returned array has one element for each -// outgoing transition in {@code s}. If the closure from transition -// i leads to a semantic predicate before Matching a symbol, the -// element at index i of the result will be {@code nil}. -// -// @param s the ATN state -// @return the expected symbols for each outgoing transition of {@code s}. -func (la *LL1Analyzer) getDecisionLookahead(s ATNState) []*IntervalSet { - if s == nil { - return nil - } - count := len(s.GetTransitions()) - look := make([]*IntervalSet, count) - for alt := 0; alt < count; alt++ { - look[alt] = NewIntervalSet() - lookBusy := NewJStore[ATNConfig, Comparator[ATNConfig]](aConfEqInst) - seeThruPreds := false // fail to get lookahead upon pred - la.look1(s.GetTransitions()[alt].getTarget(), nil, BasePredictionContextEMPTY, look[alt], lookBusy, NewBitSet(), seeThruPreds, false) - // Wipe out lookahead for la alternative if we found nothing - // or we had a predicate when we !seeThruPreds - if look[alt].length() == 0 || look[alt].contains(LL1AnalyzerHitPred) { - look[alt] = nil - } - } - return look -} - -// * -// Compute set of tokens that can follow {@code s} in the ATN in the -// specified {@code ctx}. -// -//If {@code ctx} is {@code nil} and the end of the rule containing -// {@code s} is reached, {@link Token//EPSILON} is added to the result set. -// If {@code ctx} is not {@code nil} and the end of the outermost rule is -// reached, {@link Token//EOF} is added to the result set.
-// -// @param s the ATN state -// @param stopState the ATN state to stop at. This can be a -// {@link BlockEndState} to detect epsilon paths through a closure. -// @param ctx the complete parser context, or {@code nil} if the context -// should be ignored -// -// @return The set of tokens that can follow {@code s} in the ATN in the -// specified {@code ctx}. -// / -func (la *LL1Analyzer) Look(s, stopState ATNState, ctx RuleContext) *IntervalSet { - r := NewIntervalSet() - seeThruPreds := true // ignore preds get all lookahead - var lookContext PredictionContext - if ctx != nil { - lookContext = predictionContextFromRuleContext(s.GetATN(), ctx) - } - la.look1(s, stopState, lookContext, r, NewJStore[ATNConfig, Comparator[ATNConfig]](aConfEqInst), NewBitSet(), seeThruPreds, true) - return r -} - -//* -// Compute set of tokens that can follow {@code s} in the ATN in the -// specified {@code ctx}. -// -//If {@code ctx} is {@code nil} and {@code stopState} or the end of the -// rule containing {@code s} is reached, {@link Token//EPSILON} is added to -// the result set. If {@code ctx} is not {@code nil} and {@code addEOF} is -// {@code true} and {@code stopState} or the end of the outermost rule is -// reached, {@link Token//EOF} is added to the result set.
-// -// @param s the ATN state. -// @param stopState the ATN state to stop at. This can be a -// {@link BlockEndState} to detect epsilon paths through a closure. -// @param ctx The outer context, or {@code nil} if the outer context should -// not be used. -// @param look The result lookahead set. -// @param lookBusy A set used for preventing epsilon closures in the ATN -// from causing a stack overflow. Outside code should pass -// {@code NewSetIf the symbol type does not Match, -// {@link ANTLRErrorStrategy//recoverInline} is called on the current error -// strategy to attempt recovery. If {@link //getBuildParseTree} is -// {@code true} and the token index of the symbol returned by -// {@link ANTLRErrorStrategy//recoverInline} is -1, the symbol is added to -// the parse tree by calling {@link ParserRuleContext//addErrorNode}.
-// -// @param ttype the token type to Match -// @return the Matched symbol -// @panics RecognitionException if the current input symbol did not Match -// {@code ttype} and the error strategy could not recover from the -// mismatched symbol - -func (p *BaseParser) Match(ttype int) Token { - - t := p.GetCurrentToken() - - if t.GetTokenType() == ttype { - p.errHandler.ReportMatch(p) - p.Consume() - } else { - t = p.errHandler.RecoverInline(p) - if p.BuildParseTrees && t.GetTokenIndex() == -1 { - // we must have conjured up a Newtoken during single token - // insertion - // if it's not the current symbol - p.ctx.AddErrorNode(t) - } - } - - return t -} - -// Match current input symbol as a wildcard. If the symbol type Matches -// (i.e. has a value greater than 0), {@link ANTLRErrorStrategy//ReportMatch} -// and {@link //consume} are called to complete the Match process. -// -//If the symbol type does not Match, -// {@link ANTLRErrorStrategy//recoverInline} is called on the current error -// strategy to attempt recovery. If {@link //getBuildParseTree} is -// {@code true} and the token index of the symbol returned by -// {@link ANTLRErrorStrategy//recoverInline} is -1, the symbol is added to -// the parse tree by calling {@link ParserRuleContext//addErrorNode}.
-// -// @return the Matched symbol -// @panics RecognitionException if the current input symbol did not Match -// a wildcard and the error strategy could not recover from the mismatched -// symbol - -func (p *BaseParser) MatchWildcard() Token { - t := p.GetCurrentToken() - if t.GetTokenType() > 0 { - p.errHandler.ReportMatch(p) - p.Consume() - } else { - t = p.errHandler.RecoverInline(p) - if p.BuildParseTrees && t.GetTokenIndex() == -1 { - // we must have conjured up a Newtoken during single token - // insertion - // if it's not the current symbol - p.ctx.AddErrorNode(t) - } - } - return t -} - -func (p *BaseParser) GetParserRuleContext() ParserRuleContext { - return p.ctx -} - -func (p *BaseParser) SetParserRuleContext(v ParserRuleContext) { - p.ctx = v -} - -func (p *BaseParser) GetParseListeners() []ParseTreeListener { - if p.parseListeners == nil { - return make([]ParseTreeListener, 0) - } - return p.parseListeners -} - -// Registers {@code listener} to receive events during the parsing process. -// -//To support output-preserving grammar transformations (including but not -// limited to left-recursion removal, automated left-factoring, and -// optimized code generation), calls to listener methods during the parse -// may differ substantially from calls made by -// {@link ParseTreeWalker//DEFAULT} used after the parse is complete. In -// particular, rule entry and exit events may occur in a different order -// during the parse than after the parser. In addition, calls to certain -// rule entry methods may be omitted.
-// -//With the following specific exceptions, calls to listener events are -// deterministic, i.e. for identical input the calls to listener -// methods will be the same.
-// -//If {@code listener} is {@code nil} or has not been added as a parse -// listener, p.method does nothing.
-// @param listener the listener to remove -func (p *BaseParser) RemoveParseListener(listener ParseTreeListener) { - - if p.parseListeners != nil { - - idx := -1 - for i, v := range p.parseListeners { - if v == listener { - idx = i - break - } - } - - if idx == -1 { - return - } - - // remove the listener from the slice - p.parseListeners = append(p.parseListeners[0:idx], p.parseListeners[idx+1:]...) - - if len(p.parseListeners) == 0 { - p.parseListeners = nil - } - } -} - -// Remove all parse listeners. -func (p *BaseParser) removeParseListeners() { - p.parseListeners = nil -} - -// Notify any parse listeners of an enter rule event. -func (p *BaseParser) TriggerEnterRuleEvent() { - if p.parseListeners != nil { - ctx := p.ctx - for _, listener := range p.parseListeners { - listener.EnterEveryRule(ctx) - ctx.EnterRule(listener) - } - } -} - -// Notify any parse listeners of an exit rule event. -// -// @see //addParseListener -func (p *BaseParser) TriggerExitRuleEvent() { - if p.parseListeners != nil { - // reverse order walk of listeners - ctx := p.ctx - l := len(p.parseListeners) - 1 - - for i := range p.parseListeners { - listener := p.parseListeners[l-i] - ctx.ExitRule(listener) - listener.ExitEveryRule(ctx) - } - } -} - -func (p *BaseParser) GetInterpreter() *ParserATNSimulator { - return p.Interpreter -} - -func (p *BaseParser) GetATN() *ATN { - return p.Interpreter.atn -} - -func (p *BaseParser) GetTokenFactory() TokenFactory { - return p.input.GetTokenSource().GetTokenFactory() -} - -// Tell our token source and error strategy about a Newway to create tokens.// -func (p *BaseParser) setTokenFactory(factory TokenFactory) { - p.input.GetTokenSource().setTokenFactory(factory) -} - -// The ATN with bypass alternatives is expensive to create so we create it -// lazily. -// -// @panics UnsupportedOperationException if the current parser does not -// implement the {@link //getSerializedATN()} method. -func (p *BaseParser) GetATNWithBypassAlts() { - - // TODO - panic("Not implemented!") - - // serializedAtn := p.getSerializedATN() - // if (serializedAtn == nil) { - // panic("The current parser does not support an ATN with bypass alternatives.") - // } - // result := p.bypassAltsAtnCache[serializedAtn] - // if (result == nil) { - // deserializationOptions := NewATNDeserializationOptions(nil) - // deserializationOptions.generateRuleBypassTransitions = true - // result = NewATNDeserializer(deserializationOptions).deserialize(serializedAtn) - // p.bypassAltsAtnCache[serializedAtn] = result - // } - // return result -} - -// The preferred method of getting a tree pattern. For example, here's a -// sample use: -// -//
-// ParseTree t = parser.expr()
-// ParseTreePattern p = parser.compileParseTreePattern("<ID>+0",
-// MyParser.RULE_expr)
-// ParseTreeMatch m = p.Match(t)
-// String id = m.Get("ID")
-//
-
-func (p *BaseParser) compileParseTreePattern(pattern, patternRuleIndex, lexer Lexer) {
-
- panic("NewParseTreePatternMatcher not implemented!")
- //
- // if (lexer == nil) {
- // if (p.GetTokenStream() != nil) {
- // tokenSource := p.GetTokenStream().GetTokenSource()
- // if _, ok := tokenSource.(ILexer); ok {
- // lexer = tokenSource
- // }
- // }
- // }
- // if (lexer == nil) {
- // panic("Parser can't discover a lexer to use")
- // }
-
- // m := NewParseTreePatternMatcher(lexer, p)
- // return m.compile(pattern, patternRuleIndex)
-}
-
-func (p *BaseParser) GetInputStream() IntStream {
- return p.GetTokenStream()
-}
-
-func (p *BaseParser) SetInputStream(input TokenStream) {
- p.SetTokenStream(input)
-}
-
-func (p *BaseParser) GetTokenStream() TokenStream {
- return p.input
-}
-
-// Set the token stream and reset the parser.//
-func (p *BaseParser) SetTokenStream(input TokenStream) {
- p.input = nil
- p.reset()
- p.input = input
-}
-
-// Match needs to return the current input symbol, which gets put
-// into the label for the associated token ref e.g., x=ID.
-func (p *BaseParser) GetCurrentToken() Token {
- return p.input.LT(1)
-}
-
-func (p *BaseParser) NotifyErrorListeners(msg string, offendingToken Token, err RecognitionException) {
- if offendingToken == nil {
- offendingToken = p.GetCurrentToken()
- }
- p._SyntaxErrors++
- line := offendingToken.GetLine()
- column := offendingToken.GetColumn()
- listener := p.GetErrorListenerDispatch()
- listener.SyntaxError(p, offendingToken, line, column, msg, err)
-}
-
-func (p *BaseParser) Consume() Token {
- o := p.GetCurrentToken()
- if o.GetTokenType() != TokenEOF {
- p.GetInputStream().Consume()
- }
- hasListener := p.parseListeners != nil && len(p.parseListeners) > 0
- if p.BuildParseTrees || hasListener {
- if p.errHandler.InErrorRecoveryMode(p) {
- node := p.ctx.AddErrorNode(o)
- if p.parseListeners != nil {
- for _, l := range p.parseListeners {
- l.VisitErrorNode(node)
- }
- }
-
- } else {
- node := p.ctx.AddTokenNode(o)
- if p.parseListeners != nil {
- for _, l := range p.parseListeners {
- l.VisitTerminal(node)
- }
- }
- }
- // node.invokingState = p.state
- }
-
- return o
-}
-
-func (p *BaseParser) addContextToParseTree() {
- // add current context to parent if we have a parent
- if p.ctx.GetParent() != nil {
- p.ctx.GetParent().(ParserRuleContext).AddChild(p.ctx)
- }
-}
-
-func (p *BaseParser) EnterRule(localctx ParserRuleContext, state, ruleIndex int) {
- p.SetState(state)
- p.ctx = localctx
- p.ctx.SetStart(p.input.LT(1))
- if p.BuildParseTrees {
- p.addContextToParseTree()
- }
- if p.parseListeners != nil {
- p.TriggerEnterRuleEvent()
- }
-}
-
-func (p *BaseParser) ExitRule() {
- p.ctx.SetStop(p.input.LT(-1))
- // trigger event on ctx, before it reverts to parent
- if p.parseListeners != nil {
- p.TriggerExitRuleEvent()
- }
- p.SetState(p.ctx.GetInvokingState())
- if p.ctx.GetParent() != nil {
- p.ctx = p.ctx.GetParent().(ParserRuleContext)
- } else {
- p.ctx = nil
- }
-}
-
-func (p *BaseParser) EnterOuterAlt(localctx ParserRuleContext, altNum int) {
- localctx.SetAltNumber(altNum)
- // if we have Newlocalctx, make sure we replace existing ctx
- // that is previous child of parse tree
- if p.BuildParseTrees && p.ctx != localctx {
- if p.ctx.GetParent() != nil {
- p.ctx.GetParent().(ParserRuleContext).RemoveLastChild()
- p.ctx.GetParent().(ParserRuleContext).AddChild(localctx)
- }
- }
- p.ctx = localctx
-}
-
-// Get the precedence level for the top-most precedence rule.
-//
-// @return The precedence level for the top-most precedence rule, or -1 if
-// the parser context is not nested within a precedence rule.
-
-func (p *BaseParser) GetPrecedence() int {
- if len(p.precedenceStack) == 0 {
- return -1
- }
-
- return p.precedenceStack[len(p.precedenceStack)-1]
-}
-
-func (p *BaseParser) EnterRecursionRule(localctx ParserRuleContext, state, ruleIndex, precedence int) {
- p.SetState(state)
- p.precedenceStack.Push(precedence)
- p.ctx = localctx
- p.ctx.SetStart(p.input.LT(1))
- if p.parseListeners != nil {
- p.TriggerEnterRuleEvent() // simulates rule entry for
- // left-recursive rules
- }
-}
-
-//
-// Like {@link //EnterRule} but for recursive rules.
-
-func (p *BaseParser) PushNewRecursionContext(localctx ParserRuleContext, state, ruleIndex int) {
- previous := p.ctx
- previous.SetParent(localctx)
- previous.SetInvokingState(state)
- previous.SetStop(p.input.LT(-1))
-
- p.ctx = localctx
- p.ctx.SetStart(previous.GetStart())
- if p.BuildParseTrees {
- p.ctx.AddChild(previous)
- }
- if p.parseListeners != nil {
- p.TriggerEnterRuleEvent() // simulates rule entry for
- // left-recursive rules
- }
-}
-
-func (p *BaseParser) UnrollRecursionContexts(parentCtx ParserRuleContext) {
- p.precedenceStack.Pop()
- p.ctx.SetStop(p.input.LT(-1))
- retCtx := p.ctx // save current ctx (return value)
- // unroll so ctx is as it was before call to recursive method
- if p.parseListeners != nil {
- for p.ctx != parentCtx {
- p.TriggerExitRuleEvent()
- p.ctx = p.ctx.GetParent().(ParserRuleContext)
- }
- } else {
- p.ctx = parentCtx
- }
- // hook into tree
- retCtx.SetParent(parentCtx)
- if p.BuildParseTrees && parentCtx != nil {
- // add return ctx into invoking rule's tree
- parentCtx.AddChild(retCtx)
- }
-}
-
-func (p *BaseParser) GetInvokingContext(ruleIndex int) ParserRuleContext {
- ctx := p.ctx
- for ctx != nil {
- if ctx.GetRuleIndex() == ruleIndex {
- return ctx
- }
- ctx = ctx.GetParent().(ParserRuleContext)
- }
- return nil
-}
-
-func (p *BaseParser) Precpred(localctx RuleContext, precedence int) bool {
- return precedence >= p.precedenceStack[len(p.precedenceStack)-1]
-}
-
-func (p *BaseParser) inContext(context ParserRuleContext) bool {
- // TODO: useful in parser?
- return false
-}
-
-//
-// Checks whether or not {@code symbol} can follow the current state in the
-// ATN. The behavior of p.method is equivalent to the following, but is
-// implemented such that the complete context-sensitive follow set does not
-// need to be explicitly constructed.
-//
-// -// return getExpectedTokens().contains(symbol) -//-// -// @param symbol the symbol type to check -// @return {@code true} if {@code symbol} can follow the current state in -// the ATN, otherwise {@code false}. - -func (p *BaseParser) IsExpectedToken(symbol int) bool { - atn := p.Interpreter.atn - ctx := p.ctx - s := atn.states[p.state] - following := atn.NextTokens(s, nil) - if following.contains(symbol) { - return true - } - if !following.contains(TokenEpsilon) { - return false - } - for ctx != nil && ctx.GetInvokingState() >= 0 && following.contains(TokenEpsilon) { - invokingState := atn.states[ctx.GetInvokingState()] - rt := invokingState.GetTransitions()[0] - following = atn.NextTokens(rt.(*RuleTransition).followState, nil) - if following.contains(symbol) { - return true - } - ctx = ctx.GetParent().(ParserRuleContext) - } - if following.contains(TokenEpsilon) && symbol == TokenEOF { - return true - } - - return false -} - -// Computes the set of input symbols which could follow the current parser -// state and context, as given by {@link //GetState} and {@link //GetContext}, -// respectively. -// -// @see ATN//getExpectedTokens(int, RuleContext) -func (p *BaseParser) GetExpectedTokens() *IntervalSet { - return p.Interpreter.atn.getExpectedTokens(p.state, p.ctx) -} - -func (p *BaseParser) GetExpectedTokensWithinCurrentRule() *IntervalSet { - atn := p.Interpreter.atn - s := atn.states[p.state] - return atn.NextTokens(s, nil) -} - -// Get a rule's index (i.e., {@code RULE_ruleName} field) or -1 if not found.// -func (p *BaseParser) GetRuleIndex(ruleName string) int { - var ruleIndex, ok = p.GetRuleIndexMap()[ruleName] - if ok { - return ruleIndex - } - - return -1 -} - -// Return List<String> of the rule names in your parser instance -// leading up to a call to the current rule. You could override if -// you want more details such as the file/line info of where -// in the ATN a rule is invoked. -// -// this very useful for error messages. - -func (p *BaseParser) GetRuleInvocationStack(c ParserRuleContext) []string { - if c == nil { - c = p.ctx - } - stack := make([]string, 0) - for c != nil { - // compute what follows who invoked us - ruleIndex := c.GetRuleIndex() - if ruleIndex < 0 { - stack = append(stack, "n/a") - } else { - stack = append(stack, p.GetRuleNames()[ruleIndex]) - } - - vp := c.GetParent() - - if vp == nil { - break - } - - c = vp.(ParserRuleContext) - } - return stack -} - -// For debugging and other purposes.// -func (p *BaseParser) GetDFAStrings() string { - return fmt.Sprint(p.Interpreter.decisionToDFA) -} - -// For debugging and other purposes.// -func (p *BaseParser) DumpDFA() { - seenOne := false - for _, dfa := range p.Interpreter.decisionToDFA { - if dfa.states.Len() > 0 { - if seenOne { - fmt.Println() - } - fmt.Println("Decision " + strconv.Itoa(dfa.decision) + ":") - fmt.Print(dfa.String(p.LiteralNames, p.SymbolicNames)) - seenOne = true - } - } -} - -func (p *BaseParser) GetSourceName() string { - return p.GrammarFileName -} - -// During a parse is sometimes useful to listen in on the rule entry and exit -// events as well as token Matches. p.is for quick and dirty debugging. -func (p *BaseParser) SetTrace(trace *TraceListener) { - if trace == nil { - p.RemoveParseListener(p.tracer) - p.tracer = nil - } else { - if p.tracer != nil { - p.RemoveParseListener(p.tracer) - } - p.tracer = NewTraceListener(p) - p.AddParseListener(p.tracer) - } -} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/parser_atn_simulator.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/parser_atn_simulator.go deleted file mode 100644 index 8bcc46a0d..000000000 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/parser_atn_simulator.go +++ /dev/null @@ -1,1559 +0,0 @@ -// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. -// Use of this file is governed by the BSD 3-clause license that -// can be found in the LICENSE.txt file in the project root. - -package antlr - -import ( - "fmt" - "strconv" - "strings" -) - -var ( - ParserATNSimulatorDebug = false - ParserATNSimulatorTraceATNSim = false - ParserATNSimulatorDFADebug = false - ParserATNSimulatorRetryDebug = false - TurnOffLRLoopEntryBranchOpt = false -) - -type ParserATNSimulator struct { - *BaseATNSimulator - - parser Parser - predictionMode int - input TokenStream - startIndex int - dfa *DFA - mergeCache *DoubleDict - outerContext ParserRuleContext -} - -func NewParserATNSimulator(parser Parser, atn *ATN, decisionToDFA []*DFA, sharedContextCache *PredictionContextCache) *ParserATNSimulator { - - p := new(ParserATNSimulator) - - p.BaseATNSimulator = NewBaseATNSimulator(atn, sharedContextCache) - - p.parser = parser - p.decisionToDFA = decisionToDFA - // SLL, LL, or LL + exact ambig detection?// - p.predictionMode = PredictionModeLL - // LAME globals to avoid parameters!!!!! I need these down deep in predTransition - p.input = nil - p.startIndex = 0 - p.outerContext = nil - p.dfa = nil - // Each prediction operation uses a cache for merge of prediction contexts. - // Don't keep around as it wastes huge amounts of memory. DoubleKeyMap - // isn't Synchronized but we're ok since two threads shouldn't reuse same - // parser/atnsim object because it can only handle one input at a time. - // This maps graphs a and b to merged result c. (a,b)&rarrc. We can avoid - // the merge if we ever see a and b again. Note that (b,a)&rarrc should - // also be examined during cache lookup. - // - p.mergeCache = nil - - return p -} - -func (p *ParserATNSimulator) GetPredictionMode() int { - return p.predictionMode -} - -func (p *ParserATNSimulator) SetPredictionMode(v int) { - p.predictionMode = v -} - -func (p *ParserATNSimulator) reset() { -} - -func (p *ParserATNSimulator) AdaptivePredict(input TokenStream, decision int, outerContext ParserRuleContext) int { - if ParserATNSimulatorDebug || ParserATNSimulatorTraceATNSim { - fmt.Println("adaptivePredict decision " + strconv.Itoa(decision) + - " exec LA(1)==" + p.getLookaheadName(input) + - " line " + strconv.Itoa(input.LT(1).GetLine()) + ":" + - strconv.Itoa(input.LT(1).GetColumn())) - } - - p.input = input - p.startIndex = input.Index() - p.outerContext = outerContext - - dfa := p.decisionToDFA[decision] - p.dfa = dfa - m := input.Mark() - index := input.Index() - - defer func() { - p.dfa = nil - p.mergeCache = nil // wack cache after each prediction - input.Seek(index) - input.Release(m) - }() - - // Now we are certain to have a specific decision's DFA - // But, do we still need an initial state? - var s0 *DFAState - p.atn.stateMu.RLock() - if dfa.getPrecedenceDfa() { - p.atn.edgeMu.RLock() - // the start state for a precedence DFA depends on the current - // parser precedence, and is provided by a DFA method. - s0 = dfa.getPrecedenceStartState(p.parser.GetPrecedence()) - p.atn.edgeMu.RUnlock() - } else { - // the start state for a "regular" DFA is just s0 - s0 = dfa.getS0() - } - p.atn.stateMu.RUnlock() - - if s0 == nil { - if outerContext == nil { - outerContext = ParserRuleContextEmpty - } - if ParserATNSimulatorDebug { - fmt.Println("predictATN decision " + strconv.Itoa(dfa.decision) + - " exec LA(1)==" + p.getLookaheadName(input) + - ", outerContext=" + outerContext.String(p.parser.GetRuleNames(), nil)) - } - fullCtx := false - s0Closure := p.computeStartState(dfa.atnStartState, ParserRuleContextEmpty, fullCtx) - - p.atn.stateMu.Lock() - if dfa.getPrecedenceDfa() { - // If p is a precedence DFA, we use applyPrecedenceFilter - // to convert the computed start state to a precedence start - // state. We then use DFA.setPrecedenceStartState to set the - // appropriate start state for the precedence level rather - // than simply setting DFA.s0. - // - dfa.s0.configs = s0Closure - s0Closure = p.applyPrecedenceFilter(s0Closure) - s0 = p.addDFAState(dfa, NewDFAState(-1, s0Closure)) - p.atn.edgeMu.Lock() - dfa.setPrecedenceStartState(p.parser.GetPrecedence(), s0) - p.atn.edgeMu.Unlock() - } else { - s0 = p.addDFAState(dfa, NewDFAState(-1, s0Closure)) - dfa.setS0(s0) - } - p.atn.stateMu.Unlock() - } - - alt := p.execATN(dfa, s0, input, index, outerContext) - if ParserATNSimulatorDebug { - fmt.Println("DFA after predictATN: " + dfa.String(p.parser.GetLiteralNames(), nil)) - } - return alt - -} - -// Performs ATN simulation to compute a predicted alternative based -// upon the remaining input, but also updates the DFA cache to avoid -// having to traverse the ATN again for the same input sequence. - -// There are some key conditions we're looking for after computing a new -// set of ATN configs (proposed DFA state): -// if the set is empty, there is no viable alternative for current symbol -// does the state uniquely predict an alternative? -// does the state have a conflict that would prevent us from -// putting it on the work list? - -// We also have some key operations to do: -// add an edge from previous DFA state to potentially NewDFA state, D, -// upon current symbol but only if adding to work list, which means in all -// cases except no viable alternative (and possibly non-greedy decisions?) -// collecting predicates and adding semantic context to DFA accept states -// adding rule context to context-sensitive DFA accept states -// consuming an input symbol -// Reporting a conflict -// Reporting an ambiguity -// Reporting a context sensitivity -// Reporting insufficient predicates - -// cover these cases: -// -// dead end -// single alt -// single alt + preds -// conflict -// conflict + preds -func (p *ParserATNSimulator) execATN(dfa *DFA, s0 *DFAState, input TokenStream, startIndex int, outerContext ParserRuleContext) int { - - if ParserATNSimulatorDebug || ParserATNSimulatorTraceATNSim { - fmt.Println("execATN decision " + strconv.Itoa(dfa.decision) + - ", DFA state " + s0.String() + - ", LA(1)==" + p.getLookaheadName(input) + - " line " + strconv.Itoa(input.LT(1).GetLine()) + ":" + strconv.Itoa(input.LT(1).GetColumn())) - } - - previousD := s0 - - if ParserATNSimulatorDebug { - fmt.Println("s0 = " + s0.String()) - } - t := input.LA(1) - for { // for more work - D := p.getExistingTargetState(previousD, t) - if D == nil { - D = p.computeTargetState(dfa, previousD, t) - } - if D == ATNSimulatorError { - // if any configs in previous dipped into outer context, that - // means that input up to t actually finished entry rule - // at least for SLL decision. Full LL doesn't dip into outer - // so don't need special case. - // We will get an error no matter what so delay until after - // decision better error message. Also, no reachable target - // ATN states in SLL implies LL will also get nowhere. - // If conflict in states that dip out, choose min since we - // will get error no matter what. - e := p.noViableAlt(input, outerContext, previousD.configs, startIndex) - input.Seek(startIndex) - alt := p.getSynValidOrSemInvalidAltThatFinishedDecisionEntryRule(previousD.configs, outerContext) - if alt != ATNInvalidAltNumber { - return alt - } - - panic(e) - } - if D.requiresFullContext && p.predictionMode != PredictionModeSLL { - // IF PREDS, MIGHT RESOLVE TO SINGLE ALT => SLL (or syntax error) - conflictingAlts := D.configs.GetConflictingAlts() - if D.predicates != nil { - if ParserATNSimulatorDebug { - fmt.Println("DFA state has preds in DFA sim LL failover") - } - conflictIndex := input.Index() - if conflictIndex != startIndex { - input.Seek(startIndex) - } - conflictingAlts = p.evalSemanticContext(D.predicates, outerContext, true) - if conflictingAlts.length() == 1 { - if ParserATNSimulatorDebug { - fmt.Println("Full LL avoided") - } - return conflictingAlts.minValue() - } - if conflictIndex != startIndex { - // restore the index so Reporting the fallback to full - // context occurs with the index at the correct spot - input.Seek(conflictIndex) - } - } - if ParserATNSimulatorDFADebug { - fmt.Println("ctx sensitive state " + outerContext.String(nil, nil) + " in " + D.String()) - } - fullCtx := true - s0Closure := p.computeStartState(dfa.atnStartState, outerContext, fullCtx) - p.ReportAttemptingFullContext(dfa, conflictingAlts, D.configs, startIndex, input.Index()) - alt := p.execATNWithFullContext(dfa, D, s0Closure, input, startIndex, outerContext) - return alt - } - if D.isAcceptState { - if D.predicates == nil { - return D.prediction - } - stopIndex := input.Index() - input.Seek(startIndex) - alts := p.evalSemanticContext(D.predicates, outerContext, true) - - switch alts.length() { - case 0: - panic(p.noViableAlt(input, outerContext, D.configs, startIndex)) - case 1: - return alts.minValue() - default: - // Report ambiguity after predicate evaluation to make sure the correct set of ambig alts is Reported. - p.ReportAmbiguity(dfa, D, startIndex, stopIndex, false, alts, D.configs) - return alts.minValue() - } - } - previousD = D - - if t != TokenEOF { - input.Consume() - t = input.LA(1) - } - } -} - -// Get an existing target state for an edge in the DFA. If the target state -// for the edge has not yet been computed or is otherwise not available, -// p method returns {@code nil}. -// -// @param previousD The current DFA state -// @param t The next input symbol -// @return The existing target DFA state for the given input symbol -// {@code t}, or {@code nil} if the target state for p edge is not -// already cached - -func (p *ParserATNSimulator) getExistingTargetState(previousD *DFAState, t int) *DFAState { - if t+1 < 0 { - return nil - } - - p.atn.edgeMu.RLock() - defer p.atn.edgeMu.RUnlock() - edges := previousD.getEdges() - if edges == nil || t+1 >= len(edges) { - return nil - } - return previousD.getIthEdge(t + 1) -} - -// Compute a target state for an edge in the DFA, and attempt to add the -// computed state and corresponding edge to the DFA. -// -// @param dfa The DFA -// @param previousD The current DFA state -// @param t The next input symbol -// -// @return The computed target DFA state for the given input symbol -// {@code t}. If {@code t} does not lead to a valid DFA state, p method -// returns {@link //ERROR}. - -func (p *ParserATNSimulator) computeTargetState(dfa *DFA, previousD *DFAState, t int) *DFAState { - reach := p.computeReachSet(previousD.configs, t, false) - - if reach == nil { - p.addDFAEdge(dfa, previousD, t, ATNSimulatorError) - return ATNSimulatorError - } - // create Newtarget state we'll add to DFA after it's complete - D := NewDFAState(-1, reach) - - predictedAlt := p.getUniqueAlt(reach) - - if ParserATNSimulatorDebug { - altSubSets := PredictionModegetConflictingAltSubsets(reach) - fmt.Println("SLL altSubSets=" + fmt.Sprint(altSubSets) + - ", previous=" + previousD.configs.String() + - ", configs=" + reach.String() + - ", predict=" + strconv.Itoa(predictedAlt) + - ", allSubsetsConflict=" + - fmt.Sprint(PredictionModeallSubsetsConflict(altSubSets)) + - ", conflictingAlts=" + p.getConflictingAlts(reach).String()) - } - if predictedAlt != ATNInvalidAltNumber { - // NO CONFLICT, UNIQUELY PREDICTED ALT - D.isAcceptState = true - D.configs.SetUniqueAlt(predictedAlt) - D.setPrediction(predictedAlt) - } else if PredictionModehasSLLConflictTerminatingPrediction(p.predictionMode, reach) { - // MORE THAN ONE VIABLE ALTERNATIVE - D.configs.SetConflictingAlts(p.getConflictingAlts(reach)) - D.requiresFullContext = true - // in SLL-only mode, we will stop at p state and return the minimum alt - D.isAcceptState = true - D.setPrediction(D.configs.GetConflictingAlts().minValue()) - } - if D.isAcceptState && D.configs.HasSemanticContext() { - p.predicateDFAState(D, p.atn.getDecisionState(dfa.decision)) - if D.predicates != nil { - D.setPrediction(ATNInvalidAltNumber) - } - } - // all adds to dfa are done after we've created full D state - D = p.addDFAEdge(dfa, previousD, t, D) - return D -} - -func (p *ParserATNSimulator) predicateDFAState(dfaState *DFAState, decisionState DecisionState) { - // We need to test all predicates, even in DFA states that - // uniquely predict alternative. - nalts := len(decisionState.GetTransitions()) - // Update DFA so reach becomes accept state with (predicate,alt) - // pairs if preds found for conflicting alts - altsToCollectPredsFrom := p.getConflictingAltsOrUniqueAlt(dfaState.configs) - altToPred := p.getPredsForAmbigAlts(altsToCollectPredsFrom, dfaState.configs, nalts) - if altToPred != nil { - dfaState.predicates = p.getPredicatePredictions(altsToCollectPredsFrom, altToPred) - dfaState.setPrediction(ATNInvalidAltNumber) // make sure we use preds - } else { - // There are preds in configs but they might go away - // when OR'd together like {p}? || NONE == NONE. If neither - // alt has preds, resolve to min alt - dfaState.setPrediction(altsToCollectPredsFrom.minValue()) - } -} - -// comes back with reach.uniqueAlt set to a valid alt -func (p *ParserATNSimulator) execATNWithFullContext(dfa *DFA, D *DFAState, s0 ATNConfigSet, input TokenStream, startIndex int, outerContext ParserRuleContext) int { - - if ParserATNSimulatorDebug || ParserATNSimulatorTraceATNSim { - fmt.Println("execATNWithFullContext " + s0.String()) - } - - fullCtx := true - foundExactAmbig := false - var reach ATNConfigSet - previous := s0 - input.Seek(startIndex) - t := input.LA(1) - predictedAlt := -1 - - for { // for more work - reach = p.computeReachSet(previous, t, fullCtx) - if reach == nil { - // if any configs in previous dipped into outer context, that - // means that input up to t actually finished entry rule - // at least for LL decision. Full LL doesn't dip into outer - // so don't need special case. - // We will get an error no matter what so delay until after - // decision better error message. Also, no reachable target - // ATN states in SLL implies LL will also get nowhere. - // If conflict in states that dip out, choose min since we - // will get error no matter what. - e := p.noViableAlt(input, outerContext, previous, startIndex) - input.Seek(startIndex) - alt := p.getSynValidOrSemInvalidAltThatFinishedDecisionEntryRule(previous, outerContext) - if alt != ATNInvalidAltNumber { - return alt - } - - panic(e) - } - altSubSets := PredictionModegetConflictingAltSubsets(reach) - if ParserATNSimulatorDebug { - fmt.Println("LL altSubSets=" + fmt.Sprint(altSubSets) + ", predict=" + - strconv.Itoa(PredictionModegetUniqueAlt(altSubSets)) + ", resolvesToJustOneViableAlt=" + - fmt.Sprint(PredictionModeresolvesToJustOneViableAlt(altSubSets))) - } - reach.SetUniqueAlt(p.getUniqueAlt(reach)) - // unique prediction? - if reach.GetUniqueAlt() != ATNInvalidAltNumber { - predictedAlt = reach.GetUniqueAlt() - break - } - if p.predictionMode != PredictionModeLLExactAmbigDetection { - predictedAlt = PredictionModeresolvesToJustOneViableAlt(altSubSets) - if predictedAlt != ATNInvalidAltNumber { - break - } - } else { - // In exact ambiguity mode, we never try to terminate early. - // Just keeps scarfing until we know what the conflict is - if PredictionModeallSubsetsConflict(altSubSets) && PredictionModeallSubsetsEqual(altSubSets) { - foundExactAmbig = true - predictedAlt = PredictionModegetSingleViableAlt(altSubSets) - break - } - // else there are multiple non-conflicting subsets or - // we're not sure what the ambiguity is yet. - // So, keep going. - } - previous = reach - if t != TokenEOF { - input.Consume() - t = input.LA(1) - } - } - // If the configuration set uniquely predicts an alternative, - // without conflict, then we know that it's a full LL decision - // not SLL. - if reach.GetUniqueAlt() != ATNInvalidAltNumber { - p.ReportContextSensitivity(dfa, predictedAlt, reach, startIndex, input.Index()) - return predictedAlt - } - // We do not check predicates here because we have checked them - // on-the-fly when doing full context prediction. - - // - // In non-exact ambiguity detection mode, we might actually be able to - // detect an exact ambiguity, but I'm not going to spend the cycles - // needed to check. We only emit ambiguity warnings in exact ambiguity - // mode. - // - // For example, we might know that we have conflicting configurations. - // But, that does not mean that there is no way forward without a - // conflict. It's possible to have nonconflicting alt subsets as in: - - // altSubSets=[{1, 2}, {1, 2}, {1}, {1, 2}] - - // from - // - // [(17,1,[5 $]), (13,1,[5 10 $]), (21,1,[5 10 $]), (11,1,[$]), - // (13,2,[5 10 $]), (21,2,[5 10 $]), (11,2,[$])] - // - // In p case, (17,1,[5 $]) indicates there is some next sequence that - // would resolve p without conflict to alternative 1. Any other viable - // next sequence, however, is associated with a conflict. We stop - // looking for input because no amount of further lookahead will alter - // the fact that we should predict alternative 1. We just can't say for - // sure that there is an ambiguity without looking further. - - p.ReportAmbiguity(dfa, D, startIndex, input.Index(), foundExactAmbig, reach.Alts(), reach) - - return predictedAlt -} - -func (p *ParserATNSimulator) computeReachSet(closure ATNConfigSet, t int, fullCtx bool) ATNConfigSet { - if p.mergeCache == nil { - p.mergeCache = NewDoubleDict() - } - intermediate := NewBaseATNConfigSet(fullCtx) - - // Configurations already in a rule stop state indicate reaching the end - // of the decision rule (local context) or end of the start rule (full - // context). Once reached, these configurations are never updated by a - // closure operation, so they are handled separately for the performance - // advantage of having a smaller intermediate set when calling closure. - // - // For full-context reach operations, separate handling is required to - // ensure that the alternative Matching the longest overall sequence is - // chosen when multiple such configurations can Match the input. - - var skippedStopStates []*BaseATNConfig - - // First figure out where we can reach on input t - for _, c := range closure.GetItems() { - if ParserATNSimulatorDebug { - fmt.Println("testing " + p.GetTokenName(t) + " at " + c.String()) - } - - if _, ok := c.GetState().(*RuleStopState); ok { - if fullCtx || t == TokenEOF { - skippedStopStates = append(skippedStopStates, c.(*BaseATNConfig)) - if ParserATNSimulatorDebug { - fmt.Println("added " + c.String() + " to SkippedStopStates") - } - } - continue - } - - for _, trans := range c.GetState().GetTransitions() { - target := p.getReachableTarget(trans, t) - if target != nil { - cfg := NewBaseATNConfig4(c, target) - intermediate.Add(cfg, p.mergeCache) - if ParserATNSimulatorDebug { - fmt.Println("added " + cfg.String() + " to intermediate") - } - } - } - } - - // Now figure out where the reach operation can take us... - var reach ATNConfigSet - - // This block optimizes the reach operation for intermediate sets which - // trivially indicate a termination state for the overall - // AdaptivePredict operation. - // - // The conditions assume that intermediate - // contains all configurations relevant to the reach set, but p - // condition is not true when one or more configurations have been - // withheld in SkippedStopStates, or when the current symbol is EOF. - // - if skippedStopStates == nil && t != TokenEOF { - if len(intermediate.configs) == 1 { - // Don't pursue the closure if there is just one state. - // It can only have one alternative just add to result - // Also don't pursue the closure if there is unique alternative - // among the configurations. - reach = intermediate - } else if p.getUniqueAlt(intermediate) != ATNInvalidAltNumber { - // Also don't pursue the closure if there is unique alternative - // among the configurations. - reach = intermediate - } - } - // If the reach set could not be trivially determined, perform a closure - // operation on the intermediate set to compute its initial value. - // - if reach == nil { - reach = NewBaseATNConfigSet(fullCtx) - closureBusy := NewJStore[ATNConfig, Comparator[ATNConfig]](aConfEqInst) - treatEOFAsEpsilon := t == TokenEOF - amount := len(intermediate.configs) - for k := 0; k < amount; k++ { - p.closure(intermediate.configs[k], reach, closureBusy, false, fullCtx, treatEOFAsEpsilon) - } - } - if t == TokenEOF { - // After consuming EOF no additional input is possible, so we are - // only interested in configurations which reached the end of the - // decision rule (local context) or end of the start rule (full - // context). Update reach to contain only these configurations. This - // handles both explicit EOF transitions in the grammar and implicit - // EOF transitions following the end of the decision or start rule. - // - // When reach==intermediate, no closure operation was performed. In - // p case, removeAllConfigsNotInRuleStopState needs to check for - // reachable rule stop states as well as configurations already in - // a rule stop state. - // - // This is handled before the configurations in SkippedStopStates, - // because any configurations potentially added from that list are - // already guaranteed to meet p condition whether or not it's - // required. - // - reach = p.removeAllConfigsNotInRuleStopState(reach, reach == intermediate) - } - // If SkippedStopStates!=nil, then it contains at least one - // configuration. For full-context reach operations, these - // configurations reached the end of the start rule, in which case we - // only add them back to reach if no configuration during the current - // closure operation reached such a state. This ensures AdaptivePredict - // chooses an alternative Matching the longest overall sequence when - // multiple alternatives are viable. - // - if skippedStopStates != nil && ((!fullCtx) || (!PredictionModehasConfigInRuleStopState(reach))) { - for l := 0; l < len(skippedStopStates); l++ { - reach.Add(skippedStopStates[l], p.mergeCache) - } - } - - if ParserATNSimulatorTraceATNSim { - fmt.Println("computeReachSet " + closure.String() + " -> " + reach.String()) - } - - if len(reach.GetItems()) == 0 { - return nil - } - - return reach -} - -// Return a configuration set containing only the configurations from -// {@code configs} which are in a {@link RuleStopState}. If all -// configurations in {@code configs} are already in a rule stop state, p -// method simply returns {@code configs}. -// -//
When {@code lookToEndOfRule} is true, p method uses -// {@link ATN//NextTokens} for each configuration in {@code configs} which is -// not already in a rule stop state to see if a rule stop state is reachable -// from the configuration via epsilon-only transitions.
-// -// @param configs the configuration set to update -// @param lookToEndOfRule when true, p method checks for rule stop states -// reachable by epsilon-only transitions from each configuration in -// {@code configs}. -// -// @return {@code configs} if all configurations in {@code configs} are in a -// rule stop state, otherwise return a Newconfiguration set containing only -// the configurations from {@code configs} which are in a rule stop state -func (p *ParserATNSimulator) removeAllConfigsNotInRuleStopState(configs ATNConfigSet, lookToEndOfRule bool) ATNConfigSet { - if PredictionModeallConfigsInRuleStopStates(configs) { - return configs - } - result := NewBaseATNConfigSet(configs.FullContext()) - for _, config := range configs.GetItems() { - if _, ok := config.GetState().(*RuleStopState); ok { - result.Add(config, p.mergeCache) - continue - } - if lookToEndOfRule && config.GetState().GetEpsilonOnlyTransitions() { - NextTokens := p.atn.NextTokens(config.GetState(), nil) - if NextTokens.contains(TokenEpsilon) { - endOfRuleState := p.atn.ruleToStopState[config.GetState().GetRuleIndex()] - result.Add(NewBaseATNConfig4(config, endOfRuleState), p.mergeCache) - } - } - } - return result -} - -func (p *ParserATNSimulator) computeStartState(a ATNState, ctx RuleContext, fullCtx bool) ATNConfigSet { - // always at least the implicit call to start rule - initialContext := predictionContextFromRuleContext(p.atn, ctx) - configs := NewBaseATNConfigSet(fullCtx) - if ParserATNSimulatorDebug || ParserATNSimulatorTraceATNSim { - fmt.Println("computeStartState from ATN state " + a.String() + - " initialContext=" + initialContext.String()) - } - - for i := 0; i < len(a.GetTransitions()); i++ { - target := a.GetTransitions()[i].getTarget() - c := NewBaseATNConfig6(target, i+1, initialContext) - closureBusy := NewJStore[ATNConfig, Comparator[ATNConfig]](atnConfCompInst) - p.closure(c, configs, closureBusy, true, fullCtx, false) - } - return configs -} - -// This method transforms the start state computed by -// {@link //computeStartState} to the special start state used by a -// precedence DFA for a particular precedence value. The transformation -// process applies the following changes to the start state's configuration -// set. -// -//-// The prediction context must be considered by p filter to address -// situations like the following. -//
-//
-//
-// grammar TA
-// prog: statement* EOF
-// statement: letterA | statement letterA 'b'
-// letterA: 'a'
-//
-//
-// -// If the above grammar, the ATN state immediately before the token -// reference {@code 'a'} in {@code letterA} is reachable from the left edge -// of both the primary and closure blocks of the left-recursive rule -// {@code statement}. The prediction context associated with each of these -// configurations distinguishes between them, and prevents the alternative -// which stepped out to {@code prog} (and then back in to {@code statement} -// from being eliminated by the filter. -//
-// -// @param configs The configuration set computed by -// {@link //computeStartState} as the start state for the DFA. -// @return The transformed configuration set representing the start state -// for a precedence DFA at a particular precedence level (determined by -// calling {@link Parser//getPrecedence}). -func (p *ParserATNSimulator) applyPrecedenceFilter(configs ATNConfigSet) ATNConfigSet { - - statesFromAlt1 := make(map[int]PredictionContext) - configSet := NewBaseATNConfigSet(configs.FullContext()) - - for _, config := range configs.GetItems() { - // handle alt 1 first - if config.GetAlt() != 1 { - continue - } - updatedContext := config.GetSemanticContext().evalPrecedence(p.parser, p.outerContext) - if updatedContext == nil { - // the configuration was eliminated - continue - } - statesFromAlt1[config.GetState().GetStateNumber()] = config.GetContext() - if updatedContext != config.GetSemanticContext() { - configSet.Add(NewBaseATNConfig2(config, updatedContext), p.mergeCache) - } else { - configSet.Add(config, p.mergeCache) - } - } - for _, config := range configs.GetItems() { - - if config.GetAlt() == 1 { - // already handled - continue - } - // In the future, p elimination step could be updated to also - // filter the prediction context for alternatives predicting alt>1 - // (basically a graph subtraction algorithm). - if !config.getPrecedenceFilterSuppressed() { - context := statesFromAlt1[config.GetState().GetStateNumber()] - if context != nil && context.Equals(config.GetContext()) { - // eliminated - continue - } - } - configSet.Add(config, p.mergeCache) - } - return configSet -} - -func (p *ParserATNSimulator) getReachableTarget(trans Transition, ttype int) ATNState { - if trans.Matches(ttype, 0, p.atn.maxTokenType) { - return trans.getTarget() - } - - return nil -} - -func (p *ParserATNSimulator) getPredsForAmbigAlts(ambigAlts *BitSet, configs ATNConfigSet, nalts int) []SemanticContext { - - altToPred := make([]SemanticContext, nalts+1) - for _, c := range configs.GetItems() { - if ambigAlts.contains(c.GetAlt()) { - altToPred[c.GetAlt()] = SemanticContextorContext(altToPred[c.GetAlt()], c.GetSemanticContext()) - } - } - nPredAlts := 0 - for i := 1; i <= nalts; i++ { - pred := altToPred[i] - if pred == nil { - altToPred[i] = SemanticContextNone - } else if pred != SemanticContextNone { - nPredAlts++ - } - } - // nonambig alts are nil in altToPred - if nPredAlts == 0 { - altToPred = nil - } - if ParserATNSimulatorDebug { - fmt.Println("getPredsForAmbigAlts result " + fmt.Sprint(altToPred)) - } - return altToPred -} - -func (p *ParserATNSimulator) getPredicatePredictions(ambigAlts *BitSet, altToPred []SemanticContext) []*PredPrediction { - pairs := make([]*PredPrediction, 0) - containsPredicate := false - for i := 1; i < len(altToPred); i++ { - pred := altToPred[i] - // unpredicated is indicated by SemanticContextNONE - if ambigAlts != nil && ambigAlts.contains(i) { - pairs = append(pairs, NewPredPrediction(pred, i)) - } - if pred != SemanticContextNone { - containsPredicate = true - } - } - if !containsPredicate { - return nil - } - return pairs -} - -// This method is used to improve the localization of error messages by -// choosing an alternative rather than panicing a -// {@link NoViableAltException} in particular prediction scenarios where the -// {@link //ERROR} state was reached during ATN simulation. -// -//-// The default implementation of p method uses the following -// algorithm to identify an ATN configuration which successfully parsed the -// decision entry rule. Choosing such an alternative ensures that the -// {@link ParserRuleContext} returned by the calling rule will be complete -// and valid, and the syntax error will be Reported later at a more -// localized location.
-// -//-// In some scenarios, the algorithm described above could predict an -// alternative which will result in a {@link FailedPredicateException} in -// the parser. Specifically, p could occur if the only configuration -// capable of successfully parsing to the end of the decision rule is -// blocked by a semantic predicate. By choosing p alternative within -// {@link //AdaptivePredict} instead of panicing a -// {@link NoViableAltException}, the resulting -// {@link FailedPredicateException} in the parser will identify the specific -// predicate which is preventing the parser from successfully parsing the -// decision rule, which helps developers identify and correct logic errors -// in semantic predicates. -//
-// -// @param configs The ATN configurations which were valid immediately before -// the {@link //ERROR} state was reached -// @param outerContext The is the \gamma_0 initial parser context from the paper -// or the parser stack at the instant before prediction commences. -// -// @return The value to return from {@link //AdaptivePredict}, or -// {@link ATN//INVALID_ALT_NUMBER} if a suitable alternative was not -// identified and {@link //AdaptivePredict} should Report an error instead. -func (p *ParserATNSimulator) getSynValidOrSemInvalidAltThatFinishedDecisionEntryRule(configs ATNConfigSet, outerContext ParserRuleContext) int { - cfgs := p.splitAccordingToSemanticValidity(configs, outerContext) - semValidConfigs := cfgs[0] - semInvalidConfigs := cfgs[1] - alt := p.GetAltThatFinishedDecisionEntryRule(semValidConfigs) - if alt != ATNInvalidAltNumber { // semantically/syntactically viable path exists - return alt - } - // Is there a syntactically valid path with a failed pred? - if len(semInvalidConfigs.GetItems()) > 0 { - alt = p.GetAltThatFinishedDecisionEntryRule(semInvalidConfigs) - if alt != ATNInvalidAltNumber { // syntactically viable path exists - return alt - } - } - return ATNInvalidAltNumber -} - -func (p *ParserATNSimulator) GetAltThatFinishedDecisionEntryRule(configs ATNConfigSet) int { - alts := NewIntervalSet() - - for _, c := range configs.GetItems() { - _, ok := c.GetState().(*RuleStopState) - - if c.GetReachesIntoOuterContext() > 0 || (ok && c.GetContext().hasEmptyPath()) { - alts.addOne(c.GetAlt()) - } - } - if alts.length() == 0 { - return ATNInvalidAltNumber - } - - return alts.first() -} - -// Walk the list of configurations and split them according to -// those that have preds evaluating to true/false. If no pred, assume -// true pred and include in succeeded set. Returns Pair of sets. -// -// Create a NewSet so as not to alter the incoming parameter. -// -// Assumption: the input stream has been restored to the starting point -// prediction, which is where predicates need to evaluate. - -type ATNConfigSetPair struct { - item0, item1 ATNConfigSet -} - -func (p *ParserATNSimulator) splitAccordingToSemanticValidity(configs ATNConfigSet, outerContext ParserRuleContext) []ATNConfigSet { - succeeded := NewBaseATNConfigSet(configs.FullContext()) - failed := NewBaseATNConfigSet(configs.FullContext()) - - for _, c := range configs.GetItems() { - if c.GetSemanticContext() != SemanticContextNone { - predicateEvaluationResult := c.GetSemanticContext().evaluate(p.parser, outerContext) - if predicateEvaluationResult { - succeeded.Add(c, nil) - } else { - failed.Add(c, nil) - } - } else { - succeeded.Add(c, nil) - } - } - return []ATNConfigSet{succeeded, failed} -} - -// Look through a list of predicate/alt pairs, returning alts for the -// -// pairs that win. A {@code NONE} predicate indicates an alt containing an -// unpredicated config which behaves as "always true." If !complete -// then we stop at the first predicate that evaluates to true. This -// includes pairs with nil predicates. -func (p *ParserATNSimulator) evalSemanticContext(predPredictions []*PredPrediction, outerContext ParserRuleContext, complete bool) *BitSet { - predictions := NewBitSet() - for i := 0; i < len(predPredictions); i++ { - pair := predPredictions[i] - if pair.pred == SemanticContextNone { - predictions.add(pair.alt) - if !complete { - break - } - continue - } - - predicateEvaluationResult := pair.pred.evaluate(p.parser, outerContext) - if ParserATNSimulatorDebug || ParserATNSimulatorDFADebug { - fmt.Println("eval pred " + pair.String() + "=" + fmt.Sprint(predicateEvaluationResult)) - } - if predicateEvaluationResult { - if ParserATNSimulatorDebug || ParserATNSimulatorDFADebug { - fmt.Println("PREDICT " + fmt.Sprint(pair.alt)) - } - predictions.add(pair.alt) - if !complete { - break - } - } - } - return predictions -} - -func (p *ParserATNSimulator) closure(config ATNConfig, configs ATNConfigSet, closureBusy *JStore[ATNConfig, Comparator[ATNConfig]], collectPredicates, fullCtx, treatEOFAsEpsilon bool) { - initialDepth := 0 - p.closureCheckingStopState(config, configs, closureBusy, collectPredicates, - fullCtx, initialDepth, treatEOFAsEpsilon) -} - -func (p *ParserATNSimulator) closureCheckingStopState(config ATNConfig, configs ATNConfigSet, closureBusy *JStore[ATNConfig, Comparator[ATNConfig]], collectPredicates, fullCtx bool, depth int, treatEOFAsEpsilon bool) { - if ParserATNSimulatorTraceATNSim { - fmt.Println("closure(" + config.String() + ")") - //fmt.Println("configs(" + configs.String() + ")") - if config.GetReachesIntoOuterContext() > 50 { - panic("problem") - } - } - - if _, ok := config.GetState().(*RuleStopState); ok { - // We hit rule end. If we have context info, use it - // run thru all possible stack tops in ctx - if !config.GetContext().isEmpty() { - for i := 0; i < config.GetContext().length(); i++ { - if config.GetContext().getReturnState(i) == BasePredictionContextEmptyReturnState { - if fullCtx { - configs.Add(NewBaseATNConfig1(config, config.GetState(), BasePredictionContextEMPTY), p.mergeCache) - continue - } else { - // we have no context info, just chase follow links (if greedy) - if ParserATNSimulatorDebug { - fmt.Println("FALLING off rule " + p.getRuleName(config.GetState().GetRuleIndex())) - } - p.closureWork(config, configs, closureBusy, collectPredicates, fullCtx, depth, treatEOFAsEpsilon) - } - continue - } - returnState := p.atn.states[config.GetContext().getReturnState(i)] - newContext := config.GetContext().GetParent(i) // "pop" return state - - c := NewBaseATNConfig5(returnState, config.GetAlt(), newContext, config.GetSemanticContext()) - // While we have context to pop back from, we may have - // gotten that context AFTER having falling off a rule. - // Make sure we track that we are now out of context. - c.SetReachesIntoOuterContext(config.GetReachesIntoOuterContext()) - p.closureCheckingStopState(c, configs, closureBusy, collectPredicates, fullCtx, depth-1, treatEOFAsEpsilon) - } - return - } else if fullCtx { - // reached end of start rule - configs.Add(config, p.mergeCache) - return - } else { - // else if we have no context info, just chase follow links (if greedy) - if ParserATNSimulatorDebug { - fmt.Println("FALLING off rule " + p.getRuleName(config.GetState().GetRuleIndex())) - } - } - } - p.closureWork(config, configs, closureBusy, collectPredicates, fullCtx, depth, treatEOFAsEpsilon) -} - -// Do the actual work of walking epsilon edges// -func (p *ParserATNSimulator) closureWork(config ATNConfig, configs ATNConfigSet, closureBusy *JStore[ATNConfig, Comparator[ATNConfig]], collectPredicates, fullCtx bool, depth int, treatEOFAsEpsilon bool) { - state := config.GetState() - // optimization - if !state.GetEpsilonOnlyTransitions() { - configs.Add(config, p.mergeCache) - // make sure to not return here, because EOF transitions can act as - // both epsilon transitions and non-epsilon transitions. - } - for i := 0; i < len(state.GetTransitions()); i++ { - if i == 0 && p.canDropLoopEntryEdgeInLeftRecursiveRule(config) { - continue - } - - t := state.GetTransitions()[i] - _, ok := t.(*ActionTransition) - continueCollecting := collectPredicates && !ok - c := p.getEpsilonTarget(config, t, continueCollecting, depth == 0, fullCtx, treatEOFAsEpsilon) - if ci, ok := c.(*BaseATNConfig); ok && ci != nil { - newDepth := depth - - if _, ok := config.GetState().(*RuleStopState); ok { - // target fell off end of rule mark resulting c as having dipped into outer context - // We can't get here if incoming config was rule stop and we had context - // track how far we dip into outer context. Might - // come in handy and we avoid evaluating context dependent - // preds if p is > 0. - - if p.dfa != nil && p.dfa.getPrecedenceDfa() { - if t.(*EpsilonTransition).outermostPrecedenceReturn == p.dfa.atnStartState.GetRuleIndex() { - c.setPrecedenceFilterSuppressed(true) - } - } - - c.SetReachesIntoOuterContext(c.GetReachesIntoOuterContext() + 1) - - _, present := closureBusy.Put(c) - if present { - // avoid infinite recursion for right-recursive rules - continue - } - - configs.SetDipsIntoOuterContext(true) // TODO: can remove? only care when we add to set per middle of p method - newDepth-- - if ParserATNSimulatorDebug { - fmt.Println("dips into outer ctx: " + c.String()) - } - } else { - - if !t.getIsEpsilon() { - _, present := closureBusy.Put(c) - if present { - // avoid infinite recursion for EOF* and EOF+ - continue - } - } - if _, ok := t.(*RuleTransition); ok { - // latch when newDepth goes negative - once we step out of the entry context we can't return - if newDepth >= 0 { - newDepth++ - } - } - } - p.closureCheckingStopState(c, configs, closureBusy, continueCollecting, fullCtx, newDepth, treatEOFAsEpsilon) - } - } -} - -func (p *ParserATNSimulator) canDropLoopEntryEdgeInLeftRecursiveRule(config ATNConfig) bool { - if TurnOffLRLoopEntryBranchOpt { - return false - } - - _p := config.GetState() - - // First check to see if we are in StarLoopEntryState generated during - // left-recursion elimination. For efficiency, also check if - // the context has an empty stack case. If so, it would mean - // global FOLLOW so we can't perform optimization - if _p.GetStateType() != ATNStateStarLoopEntry { - return false - } - startLoop, ok := _p.(*StarLoopEntryState) - if !ok { - return false - } - if !startLoop.precedenceRuleDecision || - config.GetContext().isEmpty() || - config.GetContext().hasEmptyPath() { - return false - } - - // Require all return states to return back to the same rule - // that p is in. - numCtxs := config.GetContext().length() - for i := 0; i < numCtxs; i++ { - returnState := p.atn.states[config.GetContext().getReturnState(i)] - if returnState.GetRuleIndex() != _p.GetRuleIndex() { - return false - } - } - x := _p.GetTransitions()[0].getTarget() - decisionStartState := x.(BlockStartState) - blockEndStateNum := decisionStartState.getEndState().stateNumber - blockEndState := p.atn.states[blockEndStateNum].(*BlockEndState) - - // Verify that the top of each stack context leads to loop entry/exit - // state through epsilon edges and w/o leaving rule. - - for i := 0; i < numCtxs; i++ { // for each stack context - returnStateNumber := config.GetContext().getReturnState(i) - returnState := p.atn.states[returnStateNumber] - - // all states must have single outgoing epsilon edge - if len(returnState.GetTransitions()) != 1 || !returnState.GetTransitions()[0].getIsEpsilon() { - return false - } - - // Look for prefix op case like 'not expr', (' type ')' expr - returnStateTarget := returnState.GetTransitions()[0].getTarget() - if returnState.GetStateType() == ATNStateBlockEnd && returnStateTarget == _p { - continue - } - - // Look for 'expr op expr' or case where expr's return state is block end - // of (...)* internal block; the block end points to loop back - // which points to p but we don't need to check that - if returnState == blockEndState { - continue - } - - // Look for ternary expr ? expr : expr. The return state points at block end, - // which points at loop entry state - if returnStateTarget == blockEndState { - continue - } - - // Look for complex prefix 'between expr and expr' case where 2nd expr's - // return state points at block end state of (...)* internal block - if returnStateTarget.GetStateType() == ATNStateBlockEnd && - len(returnStateTarget.GetTransitions()) == 1 && - returnStateTarget.GetTransitions()[0].getIsEpsilon() && - returnStateTarget.GetTransitions()[0].getTarget() == _p { - continue - } - - // anything else ain't conforming - return false - } - - return true -} - -func (p *ParserATNSimulator) getRuleName(index int) string { - if p.parser != nil && index >= 0 { - return p.parser.GetRuleNames()[index] - } - var sb strings.Builder - sb.Grow(32) - - sb.WriteString("If {@code to} is {@code nil}, p method returns {@code nil}. -// Otherwise, p method returns the {@link DFAState} returned by calling -// {@link //addDFAState} for the {@code to} state.
-// -// @param dfa The DFA -// @param from The source state for the edge -// @param t The input symbol -// @param to The target state for the edge -// -// @return If {@code to} is {@code nil}, p method returns {@code nil} -// otherwise p method returns the result of calling {@link //addDFAState} -// on {@code to} -func (p *ParserATNSimulator) addDFAEdge(dfa *DFA, from *DFAState, t int, to *DFAState) *DFAState { - if ParserATNSimulatorDebug { - fmt.Println("EDGE " + from.String() + " -> " + to.String() + " upon " + p.GetTokenName(t)) - } - if to == nil { - return nil - } - p.atn.stateMu.Lock() - to = p.addDFAState(dfa, to) // used existing if possible not incoming - p.atn.stateMu.Unlock() - if from == nil || t < -1 || t > p.atn.maxTokenType { - return to - } - p.atn.edgeMu.Lock() - if from.getEdges() == nil { - from.setEdges(make([]*DFAState, p.atn.maxTokenType+1+1)) - } - from.setIthEdge(t+1, to) // connect - p.atn.edgeMu.Unlock() - - if ParserATNSimulatorDebug { - var names []string - if p.parser != nil { - names = p.parser.GetLiteralNames() - } - - fmt.Println("DFA=\n" + dfa.String(names, nil)) - } - return to -} - -// Add state {@code D} to the DFA if it is not already present, and return -// the actual instance stored in the DFA. If a state equivalent to {@code D} -// is already in the DFA, the existing state is returned. Otherwise p -// method returns {@code D} after adding it to the DFA. -// -//If {@code D} is {@link //ERROR}, p method returns {@link //ERROR} and -// does not change the DFA.
-// -// @param dfa The dfa -// @param D The DFA state to add -// @return The state stored in the DFA. This will be either the existing -// state if {@code D} is already in the DFA, or {@code D} itself if the -// state was not already present. -func (p *ParserATNSimulator) addDFAState(dfa *DFA, d *DFAState) *DFAState { - if d == ATNSimulatorError { - return d - } - existing, present := dfa.states.Get(d) - if present { - if ParserATNSimulatorTraceATNSim { - fmt.Print("addDFAState " + d.String() + " exists") - } - return existing - } - - // The state was not present, so update it with configs - // - d.stateNumber = dfa.states.Len() - if !d.configs.ReadOnly() { - d.configs.OptimizeConfigs(p.BaseATNSimulator) - d.configs.SetReadOnly(true) - } - dfa.states.Put(d) - if ParserATNSimulatorTraceATNSim { - fmt.Println("addDFAState new " + d.String()) - } - - return d -} - -func (p *ParserATNSimulator) ReportAttemptingFullContext(dfa *DFA, conflictingAlts *BitSet, configs ATNConfigSet, startIndex, stopIndex int) { - if ParserATNSimulatorDebug || ParserATNSimulatorRetryDebug { - interval := NewInterval(startIndex, stopIndex+1) - fmt.Println("ReportAttemptingFullContext decision=" + strconv.Itoa(dfa.decision) + ":" + configs.String() + - ", input=" + p.parser.GetTokenStream().GetTextFromInterval(interval)) - } - if p.parser != nil { - p.parser.GetErrorListenerDispatch().ReportAttemptingFullContext(p.parser, dfa, startIndex, stopIndex, conflictingAlts, configs) - } -} - -func (p *ParserATNSimulator) ReportContextSensitivity(dfa *DFA, prediction int, configs ATNConfigSet, startIndex, stopIndex int) { - if ParserATNSimulatorDebug || ParserATNSimulatorRetryDebug { - interval := NewInterval(startIndex, stopIndex+1) - fmt.Println("ReportContextSensitivity decision=" + strconv.Itoa(dfa.decision) + ":" + configs.String() + - ", input=" + p.parser.GetTokenStream().GetTextFromInterval(interval)) - } - if p.parser != nil { - p.parser.GetErrorListenerDispatch().ReportContextSensitivity(p.parser, dfa, startIndex, stopIndex, prediction, configs) - } -} - -// If context sensitive parsing, we know it's ambiguity not conflict// -func (p *ParserATNSimulator) ReportAmbiguity(dfa *DFA, D *DFAState, startIndex, stopIndex int, - exact bool, ambigAlts *BitSet, configs ATNConfigSet) { - if ParserATNSimulatorDebug || ParserATNSimulatorRetryDebug { - interval := NewInterval(startIndex, stopIndex+1) - fmt.Println("ReportAmbiguity " + ambigAlts.String() + ":" + configs.String() + - ", input=" + p.parser.GetTokenStream().GetTextFromInterval(interval)) - } - if p.parser != nil { - p.parser.GetErrorListenerDispatch().ReportAmbiguity(p.parser, dfa, startIndex, stopIndex, exact, ambigAlts, configs) - } -} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/prediction_context.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/prediction_context.go deleted file mode 100644 index ba62af361..000000000 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/prediction_context.go +++ /dev/null @@ -1,806 +0,0 @@ -// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. -// Use of this file is governed by the BSD 3-clause license that -// can be found in the LICENSE.txt file in the project root. - -package antlr - -import ( - "fmt" - "golang.org/x/exp/slices" - "strconv" -) - -// Represents {@code $} in local context prediction, which means wildcard. -// {@code//+x =//}. -// / -const ( - BasePredictionContextEmptyReturnState = 0x7FFFFFFF -) - -// Represents {@code $} in an array in full context mode, when {@code $} -// doesn't mean wildcard: {@code $ + x = [$,x]}. Here, -// {@code $} = {@link //EmptyReturnState}. -// / - -var ( - BasePredictionContextglobalNodeCount = 1 - BasePredictionContextid = BasePredictionContextglobalNodeCount -) - -type PredictionContext interface { - Hash() int - Equals(interface{}) bool - GetParent(int) PredictionContext - getReturnState(int) int - length() int - isEmpty() bool - hasEmptyPath() bool - String() string -} - -type BasePredictionContext struct { - cachedHash int -} - -func NewBasePredictionContext(cachedHash int) *BasePredictionContext { - pc := new(BasePredictionContext) - pc.cachedHash = cachedHash - - return pc -} - -func (b *BasePredictionContext) isEmpty() bool { - return false -} - -func calculateHash(parent PredictionContext, returnState int) int { - h := murmurInit(1) - h = murmurUpdate(h, parent.Hash()) - h = murmurUpdate(h, returnState) - return murmurFinish(h, 2) -} - -var _emptyPredictionContextHash int - -func init() { - _emptyPredictionContextHash = murmurInit(1) - _emptyPredictionContextHash = murmurFinish(_emptyPredictionContextHash, 0) -} - -func calculateEmptyHash() int { - return _emptyPredictionContextHash -} - -// Used to cache {@link BasePredictionContext} objects. Its used for the shared -// context cash associated with contexts in DFA states. This cache -// can be used for both lexers and parsers. - -type PredictionContextCache struct { - cache map[PredictionContext]PredictionContext -} - -func NewPredictionContextCache() *PredictionContextCache { - t := new(PredictionContextCache) - t.cache = make(map[PredictionContext]PredictionContext) - return t -} - -// Add a context to the cache and return it. If the context already exists, -// return that one instead and do not add a Newcontext to the cache. -// Protect shared cache from unsafe thread access. -func (p *PredictionContextCache) add(ctx PredictionContext) PredictionContext { - if ctx == BasePredictionContextEMPTY { - return BasePredictionContextEMPTY - } - existing := p.cache[ctx] - if existing != nil { - return existing - } - p.cache[ctx] = ctx - return ctx -} - -func (p *PredictionContextCache) Get(ctx PredictionContext) PredictionContext { - return p.cache[ctx] -} - -func (p *PredictionContextCache) length() int { - return len(p.cache) -} - -type SingletonPredictionContext interface { - PredictionContext -} - -type BaseSingletonPredictionContext struct { - *BasePredictionContext - - parentCtx PredictionContext - returnState int -} - -func NewBaseSingletonPredictionContext(parent PredictionContext, returnState int) *BaseSingletonPredictionContext { - var cachedHash int - if parent != nil { - cachedHash = calculateHash(parent, returnState) - } else { - cachedHash = calculateEmptyHash() - } - - s := new(BaseSingletonPredictionContext) - s.BasePredictionContext = NewBasePredictionContext(cachedHash) - - s.parentCtx = parent - s.returnState = returnState - - return s -} - -func SingletonBasePredictionContextCreate(parent PredictionContext, returnState int) PredictionContext { - if returnState == BasePredictionContextEmptyReturnState && parent == nil { - // someone can pass in the bits of an array ctx that mean $ - return BasePredictionContextEMPTY - } - - return NewBaseSingletonPredictionContext(parent, returnState) -} - -func (b *BaseSingletonPredictionContext) length() int { - return 1 -} - -func (b *BaseSingletonPredictionContext) GetParent(index int) PredictionContext { - return b.parentCtx -} - -func (b *BaseSingletonPredictionContext) getReturnState(index int) int { - return b.returnState -} - -func (b *BaseSingletonPredictionContext) hasEmptyPath() bool { - return b.returnState == BasePredictionContextEmptyReturnState -} - -func (b *BaseSingletonPredictionContext) Hash() int { - return b.cachedHash -} - -func (b *BaseSingletonPredictionContext) Equals(other interface{}) bool { - if b == other { - return true - } - if _, ok := other.(*BaseSingletonPredictionContext); !ok { - return false - } - - otherP := other.(*BaseSingletonPredictionContext) - - if b.returnState != otherP.getReturnState(0) { - return false - } - if b.parentCtx == nil { - return otherP.parentCtx == nil - } - - return b.parentCtx.Equals(otherP.parentCtx) -} - -func (b *BaseSingletonPredictionContext) String() string { - var up string - - if b.parentCtx == nil { - up = "" - } else { - up = b.parentCtx.String() - } - - if len(up) == 0 { - if b.returnState == BasePredictionContextEmptyReturnState { - return "$" - } - - return strconv.Itoa(b.returnState) - } - - return strconv.Itoa(b.returnState) + " " + up -} - -var BasePredictionContextEMPTY = NewEmptyPredictionContext() - -type EmptyPredictionContext struct { - *BaseSingletonPredictionContext -} - -func NewEmptyPredictionContext() *EmptyPredictionContext { - - p := new(EmptyPredictionContext) - - p.BaseSingletonPredictionContext = NewBaseSingletonPredictionContext(nil, BasePredictionContextEmptyReturnState) - p.cachedHash = calculateEmptyHash() - return p -} - -func (e *EmptyPredictionContext) isEmpty() bool { - return true -} - -func (e *EmptyPredictionContext) GetParent(index int) PredictionContext { - return nil -} - -func (e *EmptyPredictionContext) getReturnState(index int) int { - return e.returnState -} - -func (e *EmptyPredictionContext) Hash() int { - return e.cachedHash -} - -func (e *EmptyPredictionContext) Equals(other interface{}) bool { - return e == other -} - -func (e *EmptyPredictionContext) String() string { - return "$" -} - -type ArrayPredictionContext struct { - *BasePredictionContext - - parents []PredictionContext - returnStates []int -} - -func NewArrayPredictionContext(parents []PredictionContext, returnStates []int) *ArrayPredictionContext { - // Parent can be nil only if full ctx mode and we make an array - // from {@link //EMPTY} and non-empty. We merge {@link //EMPTY} by using - // nil parent and - // returnState == {@link //EmptyReturnState}. - hash := murmurInit(1) - - for _, parent := range parents { - hash = murmurUpdate(hash, parent.Hash()) - } - - for _, returnState := range returnStates { - hash = murmurUpdate(hash, returnState) - } - - hash = murmurFinish(hash, len(parents)<<1) - - c := new(ArrayPredictionContext) - c.BasePredictionContext = NewBasePredictionContext(hash) - - c.parents = parents - c.returnStates = returnStates - - return c -} - -func (a *ArrayPredictionContext) GetReturnStates() []int { - return a.returnStates -} - -func (a *ArrayPredictionContext) hasEmptyPath() bool { - return a.getReturnState(a.length()-1) == BasePredictionContextEmptyReturnState -} - -func (a *ArrayPredictionContext) isEmpty() bool { - // since EmptyReturnState can only appear in the last position, we - // don't need to verify that size==1 - return a.returnStates[0] == BasePredictionContextEmptyReturnState -} - -func (a *ArrayPredictionContext) length() int { - return len(a.returnStates) -} - -func (a *ArrayPredictionContext) GetParent(index int) PredictionContext { - return a.parents[index] -} - -func (a *ArrayPredictionContext) getReturnState(index int) int { - return a.returnStates[index] -} - -// Equals is the default comparison function for ArrayPredictionContext when no specialized -// implementation is needed for a collection -func (a *ArrayPredictionContext) Equals(o interface{}) bool { - if a == o { - return true - } - other, ok := o.(*ArrayPredictionContext) - if !ok { - return false - } - if a.cachedHash != other.Hash() { - return false // can't be same if hash is different - } - - // Must compare the actual array elements and not just the array address - // - return slices.Equal(a.returnStates, other.returnStates) && - slices.EqualFunc(a.parents, other.parents, func(x, y PredictionContext) bool { - return x.Equals(y) - }) -} - -// Hash is the default hash function for ArrayPredictionContext when no specialized -// implementation is needed for a collection -func (a *ArrayPredictionContext) Hash() int { - return a.BasePredictionContext.cachedHash -} - -func (a *ArrayPredictionContext) String() string { - if a.isEmpty() { - return "[]" - } - - s := "[" - for i := 0; i < len(a.returnStates); i++ { - if i > 0 { - s = s + ", " - } - if a.returnStates[i] == BasePredictionContextEmptyReturnState { - s = s + "$" - continue - } - s = s + strconv.Itoa(a.returnStates[i]) - if a.parents[i] != nil { - s = s + " " + a.parents[i].String() - } else { - s = s + "nil" - } - } - - return s + "]" -} - -// Convert a {@link RuleContext} tree to a {@link BasePredictionContext} graph. -// Return {@link //EMPTY} if {@code outerContext} is empty or nil. -// / -func predictionContextFromRuleContext(a *ATN, outerContext RuleContext) PredictionContext { - if outerContext == nil { - outerContext = ParserRuleContextEmpty - } - // if we are in RuleContext of start rule, s, then BasePredictionContext - // is EMPTY. Nobody called us. (if we are empty, return empty) - if outerContext.GetParent() == nil || outerContext == ParserRuleContextEmpty { - return BasePredictionContextEMPTY - } - // If we have a parent, convert it to a BasePredictionContext graph - parent := predictionContextFromRuleContext(a, outerContext.GetParent().(RuleContext)) - state := a.states[outerContext.GetInvokingState()] - transition := state.GetTransitions()[0] - - return SingletonBasePredictionContextCreate(parent, transition.(*RuleTransition).followState.GetStateNumber()) -} - -func merge(a, b PredictionContext, rootIsWildcard bool, mergeCache *DoubleDict) PredictionContext { - - // Share same graph if both same - // - if a == b || a.Equals(b) { - return a - } - - // In Java, EmptyPredictionContext inherits from SingletonPredictionContext, and so the test - // in java for SingletonPredictionContext will succeed and a new ArrayPredictionContext will be created - // from it. - // In go, EmptyPredictionContext does not equate to SingletonPredictionContext and so that conversion - // will fail. We need to test for both Empty and Singleton and create an ArrayPredictionContext from - // either of them. - - ac, ok1 := a.(*BaseSingletonPredictionContext) - bc, ok2 := b.(*BaseSingletonPredictionContext) - - if ok1 && ok2 { - return mergeSingletons(ac, bc, rootIsWildcard, mergeCache) - } - // At least one of a or b is array - // If one is $ and rootIsWildcard, return $ as// wildcard - if rootIsWildcard { - if _, ok := a.(*EmptyPredictionContext); ok { - return a - } - if _, ok := b.(*EmptyPredictionContext); ok { - return b - } - } - - // Convert Singleton or Empty so both are arrays to normalize - We should not use the existing parameters - // here. - // - // TODO: I think that maybe the Prediction Context structs should be redone as there is a chance we will see this mess again - maybe redo the logic here - - var arp, arb *ArrayPredictionContext - var ok bool - if arp, ok = a.(*ArrayPredictionContext); ok { - } else if _, ok = a.(*BaseSingletonPredictionContext); ok { - arp = NewArrayPredictionContext([]PredictionContext{a.GetParent(0)}, []int{a.getReturnState(0)}) - } else if _, ok = a.(*EmptyPredictionContext); ok { - arp = NewArrayPredictionContext([]PredictionContext{}, []int{}) - } - - if arb, ok = b.(*ArrayPredictionContext); ok { - } else if _, ok = b.(*BaseSingletonPredictionContext); ok { - arb = NewArrayPredictionContext([]PredictionContext{b.GetParent(0)}, []int{b.getReturnState(0)}) - } else if _, ok = b.(*EmptyPredictionContext); ok { - arb = NewArrayPredictionContext([]PredictionContext{}, []int{}) - } - - // Both arp and arb - return mergeArrays(arp, arb, rootIsWildcard, mergeCache) -} - -// Merge two {@link SingletonBasePredictionContext} instances. -// -//Stack tops equal, parents merge is same return left graph.
-//
Same stack top, parents differ merge parents giving array node, then
-// remainders of those graphs. A Newroot node is created to point to the
-// merged parents.
-//
Different stack tops pointing to same parent. Make array node for the
-// root where both element in the root point to the same (original)
-// parent.
-//
Different stack tops pointing to different parents. Make array node for
-// the root where each element points to the corresponding original
-// parent.
-//
These local-context merge operations are used when {@code rootIsWildcard} -// is true.
-// -//{@link //EMPTY} is superset of any graph return {@link //EMPTY}.
-//
{@link //EMPTY} and anything is {@code //EMPTY}, so merged parent is
-// {@code //EMPTY} return left graph.
-//
Special case of last merge if local context.
-//
These full-context merge operations are used when {@code rootIsWildcard} -// is false.
-// -// -// -//Must keep all contexts {@link //EMPTY} in array is a special value (and
-// nil parent).
-//
Different tops, different parents.
-//
Shared top, same parents.
-//
Shared top, different parents.
-//
Shared top, all shared parents.
-//
Equal tops, merge parents and reduce top to
-// {@link SingletonBasePredictionContext}.
-//
- // When using this prediction mode, the parser will either return a correct - // parse tree (i.e. the same parse tree that would be returned with the - // {@link //LL} prediction mode), or it will Report a syntax error. If a - // syntax error is encountered when using the {@link //SLL} prediction mode, - // it may be due to either an actual syntax error in the input or indicate - // that the particular combination of grammar and input requires the more - // powerful {@link //LL} prediction abilities to complete successfully.
- // - //- // This prediction mode does not provide any guarantees for prediction - // behavior for syntactically-incorrect inputs.
- // - PredictionModeSLL = 0 - // - // The LL(*) prediction mode. This prediction mode allows the current parser - // context to be used for resolving SLL conflicts that occur during - // prediction. This is the fastest prediction mode that guarantees correct - // parse results for all combinations of grammars with syntactically correct - // inputs. - // - //- // When using this prediction mode, the parser will make correct decisions - // for all syntactically-correct grammar and input combinations. However, in - // cases where the grammar is truly ambiguous this prediction mode might not - // Report a precise answer for exactly which alternatives are - // ambiguous.
- // - //- // This prediction mode does not provide any guarantees for prediction - // behavior for syntactically-incorrect inputs.
- // - PredictionModeLL = 1 - // - // The LL(*) prediction mode with exact ambiguity detection. In addition to - // the correctness guarantees provided by the {@link //LL} prediction mode, - // this prediction mode instructs the prediction algorithm to determine the - // complete and exact set of ambiguous alternatives for every ambiguous - // decision encountered while parsing. - // - //- // This prediction mode may be used for diagnosing ambiguities during - // grammar development. Due to the performance overhead of calculating sets - // of ambiguous alternatives, this prediction mode should be avoided when - // the exact results are not necessary.
- // - //- // This prediction mode does not provide any guarantees for prediction - // behavior for syntactically-incorrect inputs.
- // - PredictionModeLLExactAmbigDetection = 2 -) - -// Computes the SLL prediction termination condition. -// -//-// This method computes the SLL prediction termination condition for both of -// the following cases.
-// -//COMBINED SLL+LL PARSING
-// -//When LL-fallback is enabled upon SLL conflict, correct predictions are -// ensured regardless of how the termination condition is computed by this -// method. Due to the substantially higher cost of LL prediction, the -// prediction should only fall back to LL when the additional lookahead -// cannot lead to a unique SLL prediction.
-// -//Assuming combined SLL+LL parsing, an SLL configuration set with only -// conflicting subsets should fall back to full LL, even if the -// configuration sets don't resolve to the same alternative (e.g. -// {@code {1,2}} and {@code {3,4}}. If there is at least one non-conflicting -// configuration, SLL could continue with the hopes that more lookahead will -// resolve via one of those non-conflicting configurations.
-// -//Here's the prediction termination rule them: SLL (for SLL+LL parsing) -// stops when it sees only conflicting configuration subsets. In contrast, -// full LL keeps going when there is uncertainty.
-// -//HEURISTIC
-// -//As a heuristic, we stop prediction when we see any conflicting subset -// unless we see a state that only has one alternative associated with it. -// The single-alt-state thing lets prediction continue upon rules like -// (otherwise, it would admit defeat too soon):
-// -//{@code [12|1|[], 6|2|[], 12|2|[]]. s : (ID | ID ID?) ” }
-// -//When the ATN simulation reaches the state before {@code ”}, it has a -// DFA state that looks like: {@code [12|1|[], 6|2|[], 12|2|[]]}. Naturally -// {@code 12|1|[]} and {@code 12|2|[]} conflict, but we cannot stop -// processing this node because alternative to has another way to continue, -// via {@code [6|2|[]]}.
-// -//It also let's us continue for this rule:
-// -//{@code [1|1|[], 1|2|[], 8|3|[]] a : A | A | A B }
-// -//After Matching input A, we reach the stop state for rule A, state 1. -// State 8 is the state right before B. Clearly alternatives 1 and 2 -// conflict and no amount of further lookahead will separate the two. -// However, alternative 3 will be able to continue and so we do not stop -// working on this state. In the previous example, we're concerned with -// states associated with the conflicting alternatives. Here alt 3 is not -// associated with the conflicting configs, but since we can continue -// looking for input reasonably, don't declare the state done.
-// -//PURE SLL PARSING
-// -//To handle pure SLL parsing, all we have to do is make sure that we -// combine stack contexts for configurations that differ only by semantic -// predicate. From there, we can do the usual SLL termination heuristic.
-// -//PREDICATES IN SLL+LL PARSING
-// -//SLL decisions don't evaluate predicates until after they reach DFA stop -// states because they need to create the DFA cache that works in all -// semantic situations. In contrast, full LL evaluates predicates collected -// during start state computation so it can ignore predicates thereafter. -// This means that SLL termination detection can totally ignore semantic -// predicates.
-// -//Implementation-wise, {@link ATNConfigSet} combines stack contexts but not -// semantic predicate contexts so we might see two configurations like the -// following.
-// -//{@code (s, 1, x, {}), (s, 1, x', {p})}
-// -//Before testing these configurations against others, we have to merge -// {@code x} and {@code x'} (without modifying the existing configurations). -// For example, we test {@code (x+x')==x”} when looking for conflicts in -// the following configurations.
-// -//{@code (s, 1, x, {}), (s, 1, x', {p}), (s, 2, x”, {})}
-// -//If the configuration set has predicates (as indicated by -// {@link ATNConfigSet//hasSemanticContext}), this algorithm makes a copy of -// the configurations to strip out all of the predicates so that a standard -// {@link ATNConfigSet} will merge everything ignoring predicates.
-func PredictionModehasSLLConflictTerminatingPrediction(mode int, configs ATNConfigSet) bool { - // Configs in rule stop states indicate reaching the end of the decision - // rule (local context) or end of start rule (full context). If all - // configs meet this condition, then none of the configurations is able - // to Match additional input so we terminate prediction. - // - if PredictionModeallConfigsInRuleStopStates(configs) { - return true - } - // pure SLL mode parsing - if mode == PredictionModeSLL { - // Don't bother with combining configs from different semantic - // contexts if we can fail over to full LL costs more time - // since we'll often fail over anyway. - if configs.HasSemanticContext() { - // dup configs, tossing out semantic predicates - dup := NewBaseATNConfigSet(false) - for _, c := range configs.GetItems() { - - // NewBaseATNConfig({semanticContext:}, c) - c = NewBaseATNConfig2(c, SemanticContextNone) - dup.Add(c, nil) - } - configs = dup - } - // now we have combined contexts for configs with dissimilar preds - } - // pure SLL or combined SLL+LL mode parsing - altsets := PredictionModegetConflictingAltSubsets(configs) - return PredictionModehasConflictingAltSet(altsets) && !PredictionModehasStateAssociatedWithOneAlt(configs) -} - -// Checks if any configuration in {@code configs} is in a -// {@link RuleStopState}. Configurations meeting this condition have reached -// the end of the decision rule (local context) or end of start rule (full -// context). -// -// @param configs the configuration set to test -// @return {@code true} if any configuration in {@code configs} is in a -// {@link RuleStopState}, otherwise {@code false} -func PredictionModehasConfigInRuleStopState(configs ATNConfigSet) bool { - for _, c := range configs.GetItems() { - if _, ok := c.GetState().(*RuleStopState); ok { - return true - } - } - return false -} - -// Checks if all configurations in {@code configs} are in a -// {@link RuleStopState}. Configurations meeting this condition have reached -// the end of the decision rule (local context) or end of start rule (full -// context). -// -// @param configs the configuration set to test -// @return {@code true} if all configurations in {@code configs} are in a -// {@link RuleStopState}, otherwise {@code false} -func PredictionModeallConfigsInRuleStopStates(configs ATNConfigSet) bool { - - for _, c := range configs.GetItems() { - if _, ok := c.GetState().(*RuleStopState); !ok { - return false - } - } - return true -} - -// Full LL prediction termination. -// -//Can we stop looking ahead during ATN simulation or is there some -// uncertainty as to which alternative we will ultimately pick, after -// consuming more input? Even if there are partial conflicts, we might know -// that everything is going to resolve to the same minimum alternative. That -// means we can stop since no more lookahead will change that fact. On the -// other hand, there might be multiple conflicts that resolve to different -// minimums. That means we need more look ahead to decide which of those -// alternatives we should predict.
-// -//The basic idea is to split the set of configurations {@code C}, into -// conflicting subsets {@code (s, _, ctx, _)} and singleton subsets with -// non-conflicting configurations. Two configurations conflict if they have -// identical {@link ATNConfig//state} and {@link ATNConfig//context} values -// but different {@link ATNConfig//alt} value, e.g. {@code (s, i, ctx, _)} -// and {@code (s, j, ctx, _)} for {@code i!=j}.
-// -//Reduce these configuration subsets to the set of possible alternatives. -// You can compute the alternative subsets in one pass as follows:
-// -//{@code A_s,ctx = {i | (s, i, ctx, _)}} for each configuration in -// {@code C} holding {@code s} and {@code ctx} fixed.
-// -//Or in pseudo-code, for each configuration {@code c} in {@code C}:
-// -//
-// map[c] U= c.{@link ATNConfig//alt alt} // map hash/equals uses s and x, not
-// alt and not pred
-//
-//
-// The values in {@code map} are the set of {@code A_s,ctx} sets.
-// -//If {@code |A_s,ctx|=1} then there is no conflict associated with -// {@code s} and {@code ctx}.
-// -//Reduce the subsets to singletons by choosing a minimum of each subset. If -// the union of these alternative subsets is a singleton, then no amount of -// more lookahead will help us. We will always pick that alternative. If, -// however, there is more than one alternative, then we are uncertain which -// alternative to predict and must continue looking for resolution. We may -// or may not discover an ambiguity in the future, even if there are no -// conflicting subsets this round.
-// -//The biggest sin is to terminate early because it means we've made a -// decision but were uncertain as to the eventual outcome. We haven't used -// enough lookahead. On the other hand, announcing a conflict too late is no -// big deal you will still have the conflict. It's just inefficient. It -// might even look until the end of file.
-// -//No special consideration for semantic predicates is required because -// predicates are evaluated on-the-fly for full LL prediction, ensuring that -// no configuration contains a semantic context during the termination -// check.
-// -//CONFLICTING CONFIGS
-// -//Two configurations {@code (s, i, x)} and {@code (s, j, x')}, conflict -// when {@code i!=j} but {@code x=x'}. Because we merge all -// {@code (s, i, _)} configurations together, that means that there are at -// most {@code n} configurations associated with state {@code s} for -// {@code n} possible alternatives in the decision. The merged stacks -// complicate the comparison of configuration contexts {@code x} and -// {@code x'}. Sam checks to see if one is a subset of the other by calling -// merge and checking to see if the merged result is either {@code x} or -// {@code x'}. If the {@code x} associated with lowest alternative {@code i} -// is the superset, then {@code i} is the only possible prediction since the -// others resolve to {@code min(i)} as well. However, if {@code x} is -// associated with {@code j>i} then at least one stack configuration for -// {@code j} is not in conflict with alternative {@code i}. The algorithm -// should keep going, looking for more lookahead due to the uncertainty.
-// -//For simplicity, I'm doing a equality check between {@code x} and -// {@code x'} that lets the algorithm continue to consume lookahead longer -// than necessary. The reason I like the equality is of course the -// simplicity but also because that is the test you need to detect the -// alternatives that are actually in conflict.
-// -//CONTINUE/STOP RULE
-// -//Continue if union of resolved alternative sets from non-conflicting and -// conflicting alternative subsets has more than one alternative. We are -// uncertain about which alternative to predict.
-// -//The complete set of alternatives, {@code [i for (_,i,_)]}, tells us which -// alternatives are still in the running for the amount of input we've -// consumed at this point. The conflicting sets let us to strip away -// configurations that won't lead to more states because we resolve -// conflicts to the configuration with a minimum alternate for the -// conflicting set.
-// -//CASES
-// -//EXACT AMBIGUITY DETECTION
-// -//If all states Report the same conflicting set of alternatives, then we -// know we have the exact ambiguity set.
-// -//|A_i|>1 and
-// A_i = A_j for all i, j.
In other words, we continue examining lookahead until all {@code A_i} -// have more than one alternative and all {@code A_i} are the same. If -// {@code A={{1,2}, {1,3}}}, then regular LL prediction would terminate -// because the resolved set is {@code {1}}. To determine what the real -// ambiguity is, we have to know whether the ambiguity is between one and -// two or one and three so we keep going. We can only stop prediction when -// we need exact ambiguity detection when the sets look like -// {@code A={{1,2}}} or {@code {{1,2},{1,2}}}, etc...
-func PredictionModeresolvesToJustOneViableAlt(altsets []*BitSet) int { - return PredictionModegetSingleViableAlt(altsets) -} - -// Determines if every alternative subset in {@code altsets} contains more -// than one alternative. -// -// @param altsets a collection of alternative subsets -// @return {@code true} if every {@link BitSet} in {@code altsets} has -// {@link BitSet//cardinality cardinality} > 1, otherwise {@code false} -func PredictionModeallSubsetsConflict(altsets []*BitSet) bool { - return !PredictionModehasNonConflictingAltSet(altsets) -} - -// Determines if any single alternative subset in {@code altsets} contains -// exactly one alternative. -// -// @param altsets a collection of alternative subsets -// @return {@code true} if {@code altsets} contains a {@link BitSet} with -// {@link BitSet//cardinality cardinality} 1, otherwise {@code false} -func PredictionModehasNonConflictingAltSet(altsets []*BitSet) bool { - for i := 0; i < len(altsets); i++ { - alts := altsets[i] - if alts.length() == 1 { - return true - } - } - return false -} - -// Determines if any single alternative subset in {@code altsets} contains -// more than one alternative. -// -// @param altsets a collection of alternative subsets -// @return {@code true} if {@code altsets} contains a {@link BitSet} with -// {@link BitSet//cardinality cardinality} > 1, otherwise {@code false} -func PredictionModehasConflictingAltSet(altsets []*BitSet) bool { - for i := 0; i < len(altsets); i++ { - alts := altsets[i] - if alts.length() > 1 { - return true - } - } - return false -} - -// Determines if every alternative subset in {@code altsets} is equivalent. -// -// @param altsets a collection of alternative subsets -// @return {@code true} if every member of {@code altsets} is equal to the -// others, otherwise {@code false} -func PredictionModeallSubsetsEqual(altsets []*BitSet) bool { - var first *BitSet - - for i := 0; i < len(altsets); i++ { - alts := altsets[i] - if first == nil { - first = alts - } else if alts != first { - return false - } - } - - return true -} - -// Returns the unique alternative predicted by all alternative subsets in -// {@code altsets}. If no such alternative exists, this method returns -// {@link ATN//INVALID_ALT_NUMBER}. -// -// @param altsets a collection of alternative subsets -func PredictionModegetUniqueAlt(altsets []*BitSet) int { - all := PredictionModeGetAlts(altsets) - if all.length() == 1 { - return all.minValue() - } - - return ATNInvalidAltNumber -} - -// Gets the complete set of represented alternatives for a collection of -// alternative subsets. This method returns the union of each {@link BitSet} -// in {@code altsets}. -// -// @param altsets a collection of alternative subsets -// @return the set of represented alternatives in {@code altsets} -func PredictionModeGetAlts(altsets []*BitSet) *BitSet { - all := NewBitSet() - for _, alts := range altsets { - all.or(alts) - } - return all -} - -// PredictionModegetConflictingAltSubsets gets the conflicting alt subsets from a configuration set. -// For each configuration {@code c} in {@code configs}: -// -//
-// map[c] U= c.{@link ATNConfig//alt alt} // map hash/equals uses s and x, not
-// alt and not pred
-//
-func PredictionModegetConflictingAltSubsets(configs ATNConfigSet) []*BitSet {
- configToAlts := NewJMap[ATNConfig, *BitSet, *ATNAltConfigComparator[ATNConfig]](atnAltCfgEqInst)
-
- for _, c := range configs.GetItems() {
-
- alts, ok := configToAlts.Get(c)
- if !ok {
- alts = NewBitSet()
- configToAlts.Put(c, alts)
- }
- alts.add(c.GetAlt())
- }
-
- return configToAlts.Values()
-}
-
-// PredictionModeGetStateToAltMap gets a map from state to alt subset from a configuration set. For each
-// configuration {@code c} in {@code configs}:
-//
-//
-// map[c.{@link ATNConfig//state state}] U= c.{@link ATNConfig//alt alt}
-//
-func PredictionModeGetStateToAltMap(configs ATNConfigSet) *AltDict {
- m := NewAltDict()
-
- for _, c := range configs.GetItems() {
- alts := m.Get(c.GetState().String())
- if alts == nil {
- alts = NewBitSet()
- m.put(c.GetState().String(), alts)
- }
- alts.(*BitSet).add(c.GetAlt())
- }
- return m
-}
-
-func PredictionModehasStateAssociatedWithOneAlt(configs ATNConfigSet) bool {
- values := PredictionModeGetStateToAltMap(configs).values()
- for i := 0; i < len(values); i++ {
- if values[i].(*BitSet).length() == 1 {
- return true
- }
- }
- return false
-}
-
-func PredictionModegetSingleViableAlt(altsets []*BitSet) int {
- result := ATNInvalidAltNumber
-
- for i := 0; i < len(altsets); i++ {
- alts := altsets[i]
- minAlt := alts.minValue()
- if result == ATNInvalidAltNumber {
- result = minAlt
- } else if result != minAlt { // more than 1 viable alt
- return ATNInvalidAltNumber
- }
- }
- return result
-}
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/recognizer.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/recognizer.go
deleted file mode 100644
index bfe542d09..000000000
--- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/recognizer.go
+++ /dev/null
@@ -1,216 +0,0 @@
-// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
-// Use of this file is governed by the BSD 3-clause license that
-// can be found in the LICENSE.txt file in the project root.
-
-package antlr
-
-import (
- "fmt"
- "strings"
-
- "strconv"
-)
-
-type Recognizer interface {
- GetLiteralNames() []string
- GetSymbolicNames() []string
- GetRuleNames() []string
-
- Sempred(RuleContext, int, int) bool
- Precpred(RuleContext, int) bool
-
- GetState() int
- SetState(int)
- Action(RuleContext, int, int)
- AddErrorListener(ErrorListener)
- RemoveErrorListeners()
- GetATN() *ATN
- GetErrorListenerDispatch() ErrorListener
-}
-
-type BaseRecognizer struct {
- listeners []ErrorListener
- state int
-
- RuleNames []string
- LiteralNames []string
- SymbolicNames []string
- GrammarFileName string
-}
-
-func NewBaseRecognizer() *BaseRecognizer {
- rec := new(BaseRecognizer)
- rec.listeners = []ErrorListener{ConsoleErrorListenerINSTANCE}
- rec.state = -1
- return rec
-}
-
-var tokenTypeMapCache = make(map[string]int)
-var ruleIndexMapCache = make(map[string]int)
-
-func (b *BaseRecognizer) checkVersion(toolVersion string) {
- runtimeVersion := "4.12.0"
- if runtimeVersion != toolVersion {
- fmt.Println("ANTLR runtime and generated code versions disagree: " + runtimeVersion + "!=" + toolVersion)
- }
-}
-
-func (b *BaseRecognizer) Action(context RuleContext, ruleIndex, actionIndex int) {
- panic("action not implemented on Recognizer!")
-}
-
-func (b *BaseRecognizer) AddErrorListener(listener ErrorListener) {
- b.listeners = append(b.listeners, listener)
-}
-
-func (b *BaseRecognizer) RemoveErrorListeners() {
- b.listeners = make([]ErrorListener, 0)
-}
-
-func (b *BaseRecognizer) GetRuleNames() []string {
- return b.RuleNames
-}
-
-func (b *BaseRecognizer) GetTokenNames() []string {
- return b.LiteralNames
-}
-
-func (b *BaseRecognizer) GetSymbolicNames() []string {
- return b.SymbolicNames
-}
-
-func (b *BaseRecognizer) GetLiteralNames() []string {
- return b.LiteralNames
-}
-
-func (b *BaseRecognizer) GetState() int {
- return b.state
-}
-
-func (b *BaseRecognizer) SetState(v int) {
- b.state = v
-}
-
-//func (b *Recognizer) GetTokenTypeMap() {
-// var tokenNames = b.GetTokenNames()
-// if (tokenNames==nil) {
-// panic("The current recognizer does not provide a list of token names.")
-// }
-// var result = tokenTypeMapCache[tokenNames]
-// if(result==nil) {
-// result = tokenNames.reduce(function(o, k, i) { o[k] = i })
-// result.EOF = TokenEOF
-// tokenTypeMapCache[tokenNames] = result
-// }
-// return result
-//}
-
-// Get a map from rule names to rule indexes.
-//
-// Used for XPath and tree pattern compilation.
-func (b *BaseRecognizer) GetRuleIndexMap() map[string]int { - - panic("Method not defined!") - // var ruleNames = b.GetRuleNames() - // if (ruleNames==nil) { - // panic("The current recognizer does not provide a list of rule names.") - // } - // - // var result = ruleIndexMapCache[ruleNames] - // if(result==nil) { - // result = ruleNames.reduce(function(o, k, i) { o[k] = i }) - // ruleIndexMapCache[ruleNames] = result - // } - // return result -} - -func (b *BaseRecognizer) GetTokenType(tokenName string) int { - panic("Method not defined!") - // var ttype = b.GetTokenTypeMap()[tokenName] - // if (ttype !=nil) { - // return ttype - // } else { - // return TokenInvalidType - // } -} - -//func (b *Recognizer) GetTokenTypeMap() map[string]int { -// Vocabulary vocabulary = getVocabulary() -// -// Synchronized (tokenTypeMapCache) { -// Map-// Since tokens on hidden channels (e.g. whitespace or comments) are not -// added to the parse trees, they will not appear in the output of b -// method. -// - -func (b *BaseRuleContext) GetParent() Tree { - return b.parentCtx -} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/token.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/token.go deleted file mode 100644 index f73b06bc6..000000000 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/token.go +++ /dev/null @@ -1,209 +0,0 @@ -// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. -// Use of this file is governed by the BSD 3-clause license that -// can be found in the LICENSE.txt file in the project root. - -package antlr - -import ( - "strconv" - "strings" -) - -type TokenSourceCharStreamPair struct { - tokenSource TokenSource - charStream CharStream -} - -// A token has properties: text, type, line, character position in the line -// (so we can ignore tabs), token channel, index, and source from which -// we obtained this token. - -type Token interface { - GetSource() *TokenSourceCharStreamPair - GetTokenType() int - GetChannel() int - GetStart() int - GetStop() int - GetLine() int - GetColumn() int - - GetText() string - SetText(s string) - - GetTokenIndex() int - SetTokenIndex(v int) - - GetTokenSource() TokenSource - GetInputStream() CharStream -} - -type BaseToken struct { - source *TokenSourceCharStreamPair - tokenType int // token type of the token - channel int // The parser ignores everything not on DEFAULT_CHANNEL - start int // optional return -1 if not implemented. - stop int // optional return -1 if not implemented. - tokenIndex int // from 0..n-1 of the token object in the input stream - line int // line=1..n of the 1st character - column int // beginning of the line at which it occurs, 0..n-1 - text string // text of the token. - readOnly bool -} - -const ( - TokenInvalidType = 0 - - // During lookahead operations, this "token" signifies we hit rule end ATN state - // and did not follow it despite needing to. - TokenEpsilon = -2 - - TokenMinUserTokenType = 1 - - TokenEOF = -1 - - // All tokens go to the parser (unless Skip() is called in that rule) - // on a particular "channel". The parser tunes to a particular channel - // so that whitespace etc... can go to the parser on a "hidden" channel. - - TokenDefaultChannel = 0 - - // Anything on different channel than DEFAULT_CHANNEL is not parsed - // by parser. - - TokenHiddenChannel = 1 -) - -func (b *BaseToken) GetChannel() int { - return b.channel -} - -func (b *BaseToken) GetStart() int { - return b.start -} - -func (b *BaseToken) GetStop() int { - return b.stop -} - -func (b *BaseToken) GetLine() int { - return b.line -} - -func (b *BaseToken) GetColumn() int { - return b.column -} - -func (b *BaseToken) GetTokenType() int { - return b.tokenType -} - -func (b *BaseToken) GetSource() *TokenSourceCharStreamPair { - return b.source -} - -func (b *BaseToken) GetTokenIndex() int { - return b.tokenIndex -} - -func (b *BaseToken) SetTokenIndex(v int) { - b.tokenIndex = v -} - -func (b *BaseToken) GetTokenSource() TokenSource { - return b.source.tokenSource -} - -func (b *BaseToken) GetInputStream() CharStream { - return b.source.charStream -} - -type CommonToken struct { - *BaseToken -} - -func NewCommonToken(source *TokenSourceCharStreamPair, tokenType, channel, start, stop int) *CommonToken { - - t := new(CommonToken) - - t.BaseToken = new(BaseToken) - - t.source = source - t.tokenType = tokenType - t.channel = channel - t.start = start - t.stop = stop - t.tokenIndex = -1 - if t.source.tokenSource != nil { - t.line = source.tokenSource.GetLine() - t.column = source.tokenSource.GetCharPositionInLine() - } else { - t.column = -1 - } - return t -} - -// An empty {@link Pair} which is used as the default value of -// {@link //source} for tokens that do not have a source. - -//CommonToken.EMPTY_SOURCE = [ nil, nil ] - -// Constructs a New{@link CommonToken} as a copy of another {@link Token}. -// -//
-// If {@code oldToken} is also a {@link CommonToken} instance, the newly -// constructed token will share a reference to the {@link //text} field and -// the {@link Pair} stored in {@link //source}. Otherwise, {@link //text} will -// be assigned the result of calling {@link //GetText}, and {@link //source} -// will be constructed from the result of {@link Token//GetTokenSource} and -// {@link Token//GetInputStream}.
-// -// @param oldToken The token to copy. -func (c *CommonToken) clone() *CommonToken { - t := NewCommonToken(c.source, c.tokenType, c.channel, c.start, c.stop) - t.tokenIndex = c.GetTokenIndex() - t.line = c.GetLine() - t.column = c.GetColumn() - t.text = c.GetText() - return t -} - -func (c *CommonToken) GetText() string { - if c.text != "" { - return c.text - } - input := c.GetInputStream() - if input == nil { - return "" - } - n := input.Size() - if c.start < n && c.stop < n { - return input.GetTextFromInterval(NewInterval(c.start, c.stop)) - } - return "-// You can insert stuff, replace, and delete chunks. Note that the operations -// are done lazily--only if you convert the buffer to a {@link String} with -// {@link TokenStream#getText()}. This is very efficient because you are not -// moving data around all the time. As the buffer of tokens is converted to -// strings, the {@link #getText()} method(s) scan the input token stream and -// check to see if there is an operation at the current index. If so, the -// operation is done and then normal {@link String} rendering continues on the -// buffer. This is like having multiple Turing machine instruction streams -// (programs) operating on a single input tape. :)
-//- -// This rewriter makes no modifications to the token stream. It does not ask the -// stream to fill itself up nor does it advance the input cursor. The token -// stream {@link TokenStream#index()} will return the same value before and -// after any {@link #getText()} call.
- -//-// The rewriter only works on tokens that you have in the buffer and ignores the -// current input cursor. If you are buffering tokens on-demand, calling -// {@link #getText()} halfway through the input will only do rewrites for those -// tokens in the first half of the file.
- -//-// Since the operations are done lazily at {@link #getText}-time, operations do -// not screw up the token index values. That is, an insert operation at token -// index {@code i} does not change the index values for tokens -// {@code i}+1..n-1.
- -//-// Because operations never actually alter the buffer, you may always get the -// original token stream back without undoing anything. Since the instructions -// are queued up, you can easily simulate transactions and roll back any changes -// if there is an error just by removing instructions. For example,
- -//
-// CharStream input = new ANTLRFileStream("input");
-// TLexer lex = new TLexer(input);
-// CommonTokenStream tokens = new CommonTokenStream(lex);
-// T parser = new T(tokens);
-// TokenStreamRewriter rewriter = new TokenStreamRewriter(tokens);
-// parser.startRule();
-//
-
-// -// Then in the rules, you can execute (assuming rewriter is visible):
- -//-// Token t,u; -// ... -// rewriter.insertAfter(t, "text to put after t");} -// rewriter.insertAfter(u, "text after u");} -// System.out.println(rewriter.getText()); -//- -//
-// You can also have multiple "instruction streams" and get multiple rewrites -// from a single pass over the input. Just name the instruction streams and use -// that name again when printing the buffer. This could be useful for generating -// a C file and also its header file--all from the same buffer:
- -//
-// rewriter.insertAfter("pass1", t, "text to put after t");}
-// rewriter.insertAfter("pass2", u, "text after u");}
-// System.out.println(rewriter.getText("pass1"));
-// System.out.println(rewriter.getText("pass2"));
-//
-
-// -// If you don't use named rewrite streams, a "default" stream is used as the -// first example shows.
- -const ( - Default_Program_Name = "default" - Program_Init_Size = 100 - Min_Token_Index = 0 -) - -// Define the rewrite operation hierarchy - -type RewriteOperation interface { - // Execute the rewrite operation by possibly adding to the buffer. - // Return the index of the next token to operate on. - Execute(buffer *bytes.Buffer) int - String() string - GetInstructionIndex() int - GetIndex() int - GetText() string - GetOpName() string - GetTokens() TokenStream - SetInstructionIndex(val int) - SetIndex(int) - SetText(string) - SetOpName(string) - SetTokens(TokenStream) -} - -type BaseRewriteOperation struct { - //Current index of rewrites list - instruction_index int - //Token buffer index - index int - //Substitution text - text string - //Actual operation name - op_name string - //Pointer to token steam - tokens TokenStream -} - -func (op *BaseRewriteOperation) GetInstructionIndex() int { - return op.instruction_index -} - -func (op *BaseRewriteOperation) GetIndex() int { - return op.index -} - -func (op *BaseRewriteOperation) GetText() string { - return op.text -} - -func (op *BaseRewriteOperation) GetOpName() string { - return op.op_name -} - -func (op *BaseRewriteOperation) GetTokens() TokenStream { - return op.tokens -} - -func (op *BaseRewriteOperation) SetInstructionIndex(val int) { - op.instruction_index = val -} - -func (op *BaseRewriteOperation) SetIndex(val int) { - op.index = val -} - -func (op *BaseRewriteOperation) SetText(val string) { - op.text = val -} - -func (op *BaseRewriteOperation) SetOpName(val string) { - op.op_name = val -} - -func (op *BaseRewriteOperation) SetTokens(val TokenStream) { - op.tokens = val -} - -func (op *BaseRewriteOperation) Execute(buffer *bytes.Buffer) int { - return op.index -} - -func (op *BaseRewriteOperation) String() string { - return fmt.Sprintf("<%s@%d:\"%s\">", - op.op_name, - op.tokens.Get(op.GetIndex()), - op.text, - ) - -} - -type InsertBeforeOp struct { - BaseRewriteOperation -} - -func NewInsertBeforeOp(index int, text string, stream TokenStream) *InsertBeforeOp { - return &InsertBeforeOp{BaseRewriteOperation: BaseRewriteOperation{ - index: index, - text: text, - op_name: "InsertBeforeOp", - tokens: stream, - }} -} - -func (op *InsertBeforeOp) Execute(buffer *bytes.Buffer) int { - buffer.WriteString(op.text) - if op.tokens.Get(op.index).GetTokenType() != TokenEOF { - buffer.WriteString(op.tokens.Get(op.index).GetText()) - } - return op.index + 1 -} - -func (op *InsertBeforeOp) String() string { - return op.BaseRewriteOperation.String() -} - -// Distinguish between insert after/before to do the "insert afters" -// first and then the "insert befores" at same index. Implementation -// of "insert after" is "insert before index+1". - -type InsertAfterOp struct { - BaseRewriteOperation -} - -func NewInsertAfterOp(index int, text string, stream TokenStream) *InsertAfterOp { - return &InsertAfterOp{BaseRewriteOperation: BaseRewriteOperation{ - index: index + 1, - text: text, - tokens: stream, - }} -} - -func (op *InsertAfterOp) Execute(buffer *bytes.Buffer) int { - buffer.WriteString(op.text) - if op.tokens.Get(op.index).GetTokenType() != TokenEOF { - buffer.WriteString(op.tokens.Get(op.index).GetText()) - } - return op.index + 1 -} - -func (op *InsertAfterOp) String() string { - return op.BaseRewriteOperation.String() -} - -// I'm going to try replacing range from x..y with (y-x)+1 ReplaceOp -// instructions. -type ReplaceOp struct { - BaseRewriteOperation - LastIndex int -} - -func NewReplaceOp(from, to int, text string, stream TokenStream) *ReplaceOp { - return &ReplaceOp{ - BaseRewriteOperation: BaseRewriteOperation{ - index: from, - text: text, - op_name: "ReplaceOp", - tokens: stream, - }, - LastIndex: to, - } -} - -func (op *ReplaceOp) Execute(buffer *bytes.Buffer) int { - if op.text != "" { - buffer.WriteString(op.text) - } - return op.LastIndex + 1 -} - -func (op *ReplaceOp) String() string { - if op.text == "" { - return fmt.Sprintf("This is a one way link. It emanates from a state (usually via a list of -// transitions) and has a target state.
-// -//Since we never have to change the ATN transitions once we construct it, -// the states. We'll use the term Edge for the DFA to distinguish them from -// ATN transitions.
- -type Transition interface { - getTarget() ATNState - setTarget(ATNState) - getIsEpsilon() bool - getLabel() *IntervalSet - getSerializationType() int - Matches(int, int, int) bool -} - -type BaseTransition struct { - target ATNState - isEpsilon bool - label int - intervalSet *IntervalSet - serializationType int -} - -func NewBaseTransition(target ATNState) *BaseTransition { - - if target == nil { - panic("target cannot be nil.") - } - - t := new(BaseTransition) - - t.target = target - // Are we epsilon, action, sempred? - t.isEpsilon = false - t.intervalSet = nil - - return t -} - -func (t *BaseTransition) getTarget() ATNState { - return t.target -} - -func (t *BaseTransition) setTarget(s ATNState) { - t.target = s -} - -func (t *BaseTransition) getIsEpsilon() bool { - return t.isEpsilon -} - -func (t *BaseTransition) getLabel() *IntervalSet { - return t.intervalSet -} - -func (t *BaseTransition) getSerializationType() int { - return t.serializationType -} - -func (t *BaseTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool { - panic("Not implemented") -} - -const ( - TransitionEPSILON = 1 - TransitionRANGE = 2 - TransitionRULE = 3 - TransitionPREDICATE = 4 // e.g., {isType(input.LT(1))}? - TransitionATOM = 5 - TransitionACTION = 6 - TransitionSET = 7 // ~(A|B) or ~atom, wildcard, which convert to next 2 - TransitionNOTSET = 8 - TransitionWILDCARD = 9 - TransitionPRECEDENCE = 10 -) - -var TransitionserializationNames = []string{ - "INVALID", - "EPSILON", - "RANGE", - "RULE", - "PREDICATE", - "ATOM", - "ACTION", - "SET", - "NOT_SET", - "WILDCARD", - "PRECEDENCE", -} - -//var TransitionserializationTypes struct { -// EpsilonTransition int -// RangeTransition int -// RuleTransition int -// PredicateTransition int -// AtomTransition int -// ActionTransition int -// SetTransition int -// NotSetTransition int -// WildcardTransition int -// PrecedencePredicateTransition int -//}{ -// TransitionEPSILON, -// TransitionRANGE, -// TransitionRULE, -// TransitionPREDICATE, -// TransitionATOM, -// TransitionACTION, -// TransitionSET, -// TransitionNOTSET, -// TransitionWILDCARD, -// TransitionPRECEDENCE -//} - -// TODO: make all transitions sets? no, should remove set edges -type AtomTransition struct { - *BaseTransition -} - -func NewAtomTransition(target ATNState, intervalSet int) *AtomTransition { - - t := new(AtomTransition) - t.BaseTransition = NewBaseTransition(target) - - t.label = intervalSet // The token type or character value or, signifies special intervalSet. - t.intervalSet = t.makeLabel() - t.serializationType = TransitionATOM - - return t -} - -func (t *AtomTransition) makeLabel() *IntervalSet { - s := NewIntervalSet() - s.addOne(t.label) - return s -} - -func (t *AtomTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool { - return t.label == symbol -} - -func (t *AtomTransition) String() string { - return strconv.Itoa(t.label) -} - -type RuleTransition struct { - *BaseTransition - - followState ATNState - ruleIndex, precedence int -} - -func NewRuleTransition(ruleStart ATNState, ruleIndex, precedence int, followState ATNState) *RuleTransition { - - t := new(RuleTransition) - t.BaseTransition = NewBaseTransition(ruleStart) - - t.ruleIndex = ruleIndex - t.precedence = precedence - t.followState = followState - t.serializationType = TransitionRULE - t.isEpsilon = true - - return t -} - -func (t *RuleTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool { - return false -} - -type EpsilonTransition struct { - *BaseTransition - - outermostPrecedenceReturn int -} - -func NewEpsilonTransition(target ATNState, outermostPrecedenceReturn int) *EpsilonTransition { - - t := new(EpsilonTransition) - t.BaseTransition = NewBaseTransition(target) - - t.serializationType = TransitionEPSILON - t.isEpsilon = true - t.outermostPrecedenceReturn = outermostPrecedenceReturn - return t -} - -func (t *EpsilonTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool { - return false -} - -func (t *EpsilonTransition) String() string { - return "epsilon" -} - -type RangeTransition struct { - *BaseTransition - - start, stop int -} - -func NewRangeTransition(target ATNState, start, stop int) *RangeTransition { - - t := new(RangeTransition) - t.BaseTransition = NewBaseTransition(target) - - t.serializationType = TransitionRANGE - t.start = start - t.stop = stop - t.intervalSet = t.makeLabel() - return t -} - -func (t *RangeTransition) makeLabel() *IntervalSet { - s := NewIntervalSet() - s.addRange(t.start, t.stop) - return s -} - -func (t *RangeTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool { - return symbol >= t.start && symbol <= t.stop -} - -func (t *RangeTransition) String() string { - var sb strings.Builder - sb.WriteByte('\'') - sb.WriteRune(rune(t.start)) - sb.WriteString("'..'") - sb.WriteRune(rune(t.stop)) - sb.WriteByte('\'') - return sb.String() -} - -type AbstractPredicateTransition interface { - Transition - IAbstractPredicateTransitionFoo() -} - -type BaseAbstractPredicateTransition struct { - *BaseTransition -} - -func NewBasePredicateTransition(target ATNState) *BaseAbstractPredicateTransition { - - t := new(BaseAbstractPredicateTransition) - t.BaseTransition = NewBaseTransition(target) - - return t -} - -func (a *BaseAbstractPredicateTransition) IAbstractPredicateTransitionFoo() {} - -type PredicateTransition struct { - *BaseAbstractPredicateTransition - - isCtxDependent bool - ruleIndex, predIndex int -} - -func NewPredicateTransition(target ATNState, ruleIndex, predIndex int, isCtxDependent bool) *PredicateTransition { - - t := new(PredicateTransition) - t.BaseAbstractPredicateTransition = NewBasePredicateTransition(target) - - t.serializationType = TransitionPREDICATE - t.ruleIndex = ruleIndex - t.predIndex = predIndex - t.isCtxDependent = isCtxDependent // e.g., $i ref in pred - t.isEpsilon = true - return t -} - -func (t *PredicateTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool { - return false -} - -func (t *PredicateTransition) getPredicate() *Predicate { - return NewPredicate(t.ruleIndex, t.predIndex, t.isCtxDependent) -} - -func (t *PredicateTransition) String() string { - return "pred_" + strconv.Itoa(t.ruleIndex) + ":" + strconv.Itoa(t.predIndex) -} - -type ActionTransition struct { - *BaseTransition - - isCtxDependent bool - ruleIndex, actionIndex, predIndex int -} - -func NewActionTransition(target ATNState, ruleIndex, actionIndex int, isCtxDependent bool) *ActionTransition { - - t := new(ActionTransition) - t.BaseTransition = NewBaseTransition(target) - - t.serializationType = TransitionACTION - t.ruleIndex = ruleIndex - t.actionIndex = actionIndex - t.isCtxDependent = isCtxDependent // e.g., $i ref in pred - t.isEpsilon = true - return t -} - -func (t *ActionTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool { - return false -} - -func (t *ActionTransition) String() string { - return "action_" + strconv.Itoa(t.ruleIndex) + ":" + strconv.Itoa(t.actionIndex) -} - -type SetTransition struct { - *BaseTransition -} - -func NewSetTransition(target ATNState, set *IntervalSet) *SetTransition { - - t := new(SetTransition) - t.BaseTransition = NewBaseTransition(target) - - t.serializationType = TransitionSET - if set != nil { - t.intervalSet = set - } else { - t.intervalSet = NewIntervalSet() - t.intervalSet.addOne(TokenInvalidType) - } - - return t -} - -func (t *SetTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool { - return t.intervalSet.contains(symbol) -} - -func (t *SetTransition) String() string { - return t.intervalSet.String() -} - -type NotSetTransition struct { - *SetTransition -} - -func NewNotSetTransition(target ATNState, set *IntervalSet) *NotSetTransition { - - t := new(NotSetTransition) - - t.SetTransition = NewSetTransition(target, set) - - t.serializationType = TransitionNOTSET - - return t -} - -func (t *NotSetTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool { - return symbol >= minVocabSymbol && symbol <= maxVocabSymbol && !t.intervalSet.contains(symbol) -} - -func (t *NotSetTransition) String() string { - return "~" + t.intervalSet.String() -} - -type WildcardTransition struct { - *BaseTransition -} - -func NewWildcardTransition(target ATNState) *WildcardTransition { - - t := new(WildcardTransition) - t.BaseTransition = NewBaseTransition(target) - - t.serializationType = TransitionWILDCARD - return t -} - -func (t *WildcardTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool { - return symbol >= minVocabSymbol && symbol <= maxVocabSymbol -} - -func (t *WildcardTransition) String() string { - return "." -} - -type PrecedencePredicateTransition struct { - *BaseAbstractPredicateTransition - - precedence int -} - -func NewPrecedencePredicateTransition(target ATNState, precedence int) *PrecedencePredicateTransition { - - t := new(PrecedencePredicateTransition) - t.BaseAbstractPredicateTransition = NewBasePredicateTransition(target) - - t.serializationType = TransitionPRECEDENCE - t.precedence = precedence - t.isEpsilon = true - - return t -} - -func (t *PrecedencePredicateTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool { - return false -} - -func (t *PrecedencePredicateTransition) getPredicate() *PrecedencePredicate { - return NewPrecedencePredicate(t.precedence) -} - -func (t *PrecedencePredicateTransition) String() string { - return fmt.Sprint(t.precedence) + " >= _p" -} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/tree.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/tree.go deleted file mode 100644 index 85b4f137b..000000000 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/tree.go +++ /dev/null @@ -1,253 +0,0 @@ -// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. -// Use of this file is governed by the BSD 3-clause license that -// can be found in the LICENSE.txt file in the project root. - -package antlr - -// The basic notion of a tree has a parent, a payload, and a list of children. -// It is the most abstract interface for all the trees used by ANTLR. -/// - -var TreeInvalidInterval = NewInterval(-1, -2) - -type Tree interface { - GetParent() Tree - SetParent(Tree) - GetPayload() interface{} - GetChild(i int) Tree - GetChildCount() int - GetChildren() []Tree -} - -type SyntaxTree interface { - Tree - - GetSourceInterval() *Interval -} - -type ParseTree interface { - SyntaxTree - - Accept(Visitor ParseTreeVisitor) interface{} - GetText() string - - ToStringTree([]string, Recognizer) string -} - -type RuleNode interface { - ParseTree - - GetRuleContext() RuleContext - GetBaseRuleContext() *BaseRuleContext -} - -type TerminalNode interface { - ParseTree - - GetSymbol() Token -} - -type ErrorNode interface { - TerminalNode - - errorNode() -} - -type ParseTreeVisitor interface { - Visit(tree ParseTree) interface{} - VisitChildren(node RuleNode) interface{} - VisitTerminal(node TerminalNode) interface{} - VisitErrorNode(node ErrorNode) interface{} -} - -type BaseParseTreeVisitor struct{} - -var _ ParseTreeVisitor = &BaseParseTreeVisitor{} - -func (v *BaseParseTreeVisitor) Visit(tree ParseTree) interface{} { return tree.Accept(v) } -func (v *BaseParseTreeVisitor) VisitChildren(node RuleNode) interface{} { return nil } -func (v *BaseParseTreeVisitor) VisitTerminal(node TerminalNode) interface{} { return nil } -func (v *BaseParseTreeVisitor) VisitErrorNode(node ErrorNode) interface{} { return nil } - -// TODO -//func (this ParseTreeVisitor) Visit(ctx) { -// if (Utils.isArray(ctx)) { -// self := this -// return ctx.map(function(child) { return VisitAtom(self, child)}) -// } else { -// return VisitAtom(this, ctx) -// } -//} -// -//func VisitAtom(Visitor, ctx) { -// if (ctx.parser == nil) { //is terminal -// return -// } -// -// name := ctx.parser.ruleNames[ctx.ruleIndex] -// funcName := "Visit" + Utils.titleCase(name) -// -// return Visitor[funcName](ctx) -//} - -type ParseTreeListener interface { - VisitTerminal(node TerminalNode) - VisitErrorNode(node ErrorNode) - EnterEveryRule(ctx ParserRuleContext) - ExitEveryRule(ctx ParserRuleContext) -} - -type BaseParseTreeListener struct{} - -var _ ParseTreeListener = &BaseParseTreeListener{} - -func (l *BaseParseTreeListener) VisitTerminal(node TerminalNode) {} -func (l *BaseParseTreeListener) VisitErrorNode(node ErrorNode) {} -func (l *BaseParseTreeListener) EnterEveryRule(ctx ParserRuleContext) {} -func (l *BaseParseTreeListener) ExitEveryRule(ctx ParserRuleContext) {} - -type TerminalNodeImpl struct { - parentCtx RuleContext - - symbol Token -} - -var _ TerminalNode = &TerminalNodeImpl{} - -func NewTerminalNodeImpl(symbol Token) *TerminalNodeImpl { - tn := new(TerminalNodeImpl) - - tn.parentCtx = nil - tn.symbol = symbol - - return tn -} - -func (t *TerminalNodeImpl) GetChild(i int) Tree { - return nil -} - -func (t *TerminalNodeImpl) GetChildren() []Tree { - return nil -} - -func (t *TerminalNodeImpl) SetChildren(tree []Tree) { - panic("Cannot set children on terminal node") -} - -func (t *TerminalNodeImpl) GetSymbol() Token { - return t.symbol -} - -func (t *TerminalNodeImpl) GetParent() Tree { - return t.parentCtx -} - -func (t *TerminalNodeImpl) SetParent(tree Tree) { - t.parentCtx = tree.(RuleContext) -} - -func (t *TerminalNodeImpl) GetPayload() interface{} { - return t.symbol -} - -func (t *TerminalNodeImpl) GetSourceInterval() *Interval { - if t.symbol == nil { - return TreeInvalidInterval - } - tokenIndex := t.symbol.GetTokenIndex() - return NewInterval(tokenIndex, tokenIndex) -} - -func (t *TerminalNodeImpl) GetChildCount() int { - return 0 -} - -func (t *TerminalNodeImpl) Accept(v ParseTreeVisitor) interface{} { - return v.VisitTerminal(t) -} - -func (t *TerminalNodeImpl) GetText() string { - return t.symbol.GetText() -} - -func (t *TerminalNodeImpl) String() string { - if t.symbol.GetTokenType() == TokenEOF { - return "The default implementation simply calls {@link //endErrorCondition} to +// ensure that the handler is not in error recovery mode.
+func (d *DefaultErrorStrategy) reset(recognizer Parser) { + d.endErrorCondition(recognizer) +} + +// This method is called to enter error recovery mode when a recognition +// exception is Reported. +func (d *DefaultErrorStrategy) beginErrorCondition(_ Parser) { + d.errorRecoveryMode = true +} + +func (d *DefaultErrorStrategy) InErrorRecoveryMode(_ Parser) bool { + return d.errorRecoveryMode +} + +// This method is called to leave error recovery mode after recovering from +// a recognition exception. +func (d *DefaultErrorStrategy) endErrorCondition(_ Parser) { + d.errorRecoveryMode = false + d.lastErrorStates = nil + d.lastErrorIndex = -1 +} + +// ReportMatch is the default implementation of error matching and simply calls endErrorCondition. +func (d *DefaultErrorStrategy) ReportMatch(recognizer Parser) { + d.endErrorCondition(recognizer) +} + +// ReportError is the default implementation of error reporting. +// It returns immediately if the handler is already +// in error recovery mode. Otherwise, it calls [beginErrorCondition] +// and dispatches the Reporting task based on the runtime type of e +// according to the following table. +// +// [NoViableAltException] : Dispatches the call to [ReportNoViableAlternative] +// [InputMisMatchException] : Dispatches the call to [ReportInputMisMatch] +// [FailedPredicateException] : Dispatches the call to [ReportFailedPredicate] +// All other types : Calls [NotifyErrorListeners] to Report the exception +func (d *DefaultErrorStrategy) ReportError(recognizer Parser, e RecognitionException) { + // if we've already Reported an error and have not Matched a token + // yet successfully, don't Report any errors. + if d.InErrorRecoveryMode(recognizer) { + return // don't Report spurious errors + } + d.beginErrorCondition(recognizer) + + switch t := e.(type) { + default: + fmt.Println("unknown recognition error type: " + reflect.TypeOf(e).Name()) + // fmt.Println(e.stack) + recognizer.NotifyErrorListeners(e.GetMessage(), e.GetOffendingToken(), e) + case *NoViableAltException: + d.ReportNoViableAlternative(recognizer, t) + case *InputMisMatchException: + d.ReportInputMisMatch(recognizer, t) + case *FailedPredicateException: + d.ReportFailedPredicate(recognizer, t) + } +} + +// Recover is the default recovery implementation. +// It reSynchronizes the parser by consuming tokens until we find one in the reSynchronization set - +// loosely the set of tokens that can follow the current rule. +func (d *DefaultErrorStrategy) Recover(recognizer Parser, _ RecognitionException) { + + if d.lastErrorIndex == recognizer.GetInputStream().Index() && + d.lastErrorStates != nil && d.lastErrorStates.contains(recognizer.GetState()) { + // uh oh, another error at same token index and previously-Visited + // state in ATN must be a case where LT(1) is in the recovery + // token set so nothing got consumed. Consume a single token + // at least to prevent an infinite loop d is a failsafe. + recognizer.Consume() + } + d.lastErrorIndex = recognizer.GetInputStream().Index() + if d.lastErrorStates == nil { + d.lastErrorStates = NewIntervalSet() + } + d.lastErrorStates.addOne(recognizer.GetState()) + followSet := d.GetErrorRecoverySet(recognizer) + d.consumeUntil(recognizer, followSet) +} + +// Sync is the default implementation of error strategy synchronization. +// +// This Sync makes sure that the current lookahead symbol is consistent with what were expecting +// at this point in the [ATN]. You can call this anytime but ANTLR only +// generates code to check before sub-rules/loops and each iteration. +// +// Implements [Jim Idle]'s magic Sync mechanism in closures and optional +// sub-rules. E.g.: +// +// a : Sync ( stuff Sync )* +// Sync : {consume to what can follow Sync} +// +// At the start of a sub-rule upon error, Sync performs single +// token deletion, if possible. If it can't do that, it bails on the current +// rule and uses the default error recovery, which consumes until the +// reSynchronization set of the current rule. +// +// If the sub-rule is optional +// +// ({@code (...)?}, {@code (...)*}, +// +// or a block with an empty alternative), then the expected set includes what follows +// the sub-rule. +// +// During loop iteration, it consumes until it sees a token that can start a +// sub-rule or what follows loop. Yes, that is pretty aggressive. We opt to +// stay in the loop as long as possible. +// +// # Origins +// +// Previous versions of ANTLR did a poor job of their recovery within loops. +// A single mismatch token or missing token would force the parser to bail +// out of the entire rules surrounding the loop. So, for rule: +// +// classfunc : 'class' ID '{' member* '}' +// +// input with an extra token between members would force the parser to +// consume until it found the next class definition rather than the next +// member definition of the current class. +// +// This functionality cost a bit of effort because the parser has to +// compare the token set at the start of the loop and at each iteration. If for +// some reason speed is suffering for you, you can turn off this +// functionality by simply overriding this method as empty: +// +// { } +// +// [Jim Idle]: https://github.com/jimidle +func (d *DefaultErrorStrategy) Sync(recognizer Parser) { + // If already recovering, don't try to Sync + if d.InErrorRecoveryMode(recognizer) { + return + } + + s := recognizer.GetInterpreter().atn.states[recognizer.GetState()] + la := recognizer.GetTokenStream().LA(1) + + // try cheaper subset first might get lucky. seems to shave a wee bit off + nextTokens := recognizer.GetATN().NextTokens(s, nil) + if nextTokens.contains(TokenEpsilon) || nextTokens.contains(la) { + return + } + + switch s.GetStateType() { + case ATNStateBlockStart, ATNStateStarBlockStart, ATNStatePlusBlockStart, ATNStateStarLoopEntry: + // Report error and recover if possible + if d.SingleTokenDeletion(recognizer) != nil { + return + } + recognizer.SetError(NewInputMisMatchException(recognizer)) + case ATNStatePlusLoopBack, ATNStateStarLoopBack: + d.ReportUnwantedToken(recognizer) + expecting := NewIntervalSet() + expecting.addSet(recognizer.GetExpectedTokens()) + whatFollowsLoopIterationOrRule := expecting.addSet(d.GetErrorRecoverySet(recognizer)) + d.consumeUntil(recognizer, whatFollowsLoopIterationOrRule) + default: + // do nothing if we can't identify the exact kind of ATN state + } +} + +// ReportNoViableAlternative is called by [ReportError] when the exception is a [NoViableAltException]. +// +// See also [ReportError] +func (d *DefaultErrorStrategy) ReportNoViableAlternative(recognizer Parser, e *NoViableAltException) { + tokens := recognizer.GetTokenStream() + var input string + if tokens != nil { + if e.startToken.GetTokenType() == TokenEOF { + input = "If the state number is not known, b method returns -1.
+ +// getExpectedTokens gets the set of input symbols which could potentially follow the +// previously Matched symbol at the time this exception was raised. +// +// If the set of expected tokens is not known and could not be computed, +// this method returns nil. +// +// The func returns the set of token types that could potentially follow the current +// state in the {ATN}, or nil if the information is not available. + +func (b *BaseRecognitionException) getExpectedTokens() *IntervalSet { + if b.recognizer != nil { + return b.recognizer.GetATN().getExpectedTokens(b.offendingState, b.ctx) + } + + return nil +} + +func (b *BaseRecognitionException) String() string { + return b.message +} + +type LexerNoViableAltException struct { + *BaseRecognitionException + + startIndex int + deadEndConfigs *ATNConfigSet +} + +func NewLexerNoViableAltException(lexer Lexer, input CharStream, startIndex int, deadEndConfigs *ATNConfigSet) *LexerNoViableAltException { + + l := new(LexerNoViableAltException) + + l.BaseRecognitionException = NewBaseRecognitionException("", lexer, input, nil) + + l.startIndex = startIndex + l.deadEndConfigs = deadEndConfigs + + return l +} + +func (l *LexerNoViableAltException) String() string { + symbol := "" + if l.startIndex >= 0 && l.startIndex < l.input.Size() { + symbol = l.input.(CharStream).GetTextFromInterval(NewInterval(l.startIndex, l.startIndex)) + } + return "LexerNoViableAltException" + symbol +} + +type NoViableAltException struct { + *BaseRecognitionException + + startToken Token + offendingToken Token + ctx ParserRuleContext + deadEndConfigs *ATNConfigSet +} + +// NewNoViableAltException creates an exception indicating that the parser could not decide which of two or more paths +// to take based upon the remaining input. It tracks the starting token +// of the offending input and also knows where the parser was +// in the various paths when the error. +// +// Reported by [ReportNoViableAlternative] +func NewNoViableAltException(recognizer Parser, input TokenStream, startToken Token, offendingToken Token, deadEndConfigs *ATNConfigSet, ctx ParserRuleContext) *NoViableAltException { + + if ctx == nil { + ctx = recognizer.GetParserRuleContext() + } + + if offendingToken == nil { + offendingToken = recognizer.GetCurrentToken() + } + + if startToken == nil { + startToken = recognizer.GetCurrentToken() + } + + if input == nil { + input = recognizer.GetInputStream().(TokenStream) + } + + n := new(NoViableAltException) + n.BaseRecognitionException = NewBaseRecognitionException("", recognizer, input, ctx) + + // Which configurations did we try at input.Index() that couldn't Match + // input.LT(1) + n.deadEndConfigs = deadEndConfigs + + // The token object at the start index the input stream might + // not be buffering tokens so get a reference to it. + // + // At the time the error occurred, of course the stream needs to keep a + // buffer of all the tokens, but later we might not have access to those. + n.startToken = startToken + n.offendingToken = offendingToken + + return n +} + +type InputMisMatchException struct { + *BaseRecognitionException +} + +// NewInputMisMatchException creates an exception that signifies any kind of mismatched input exceptions such as +// when the current input does not Match the expected token. +func NewInputMisMatchException(recognizer Parser) *InputMisMatchException { + + i := new(InputMisMatchException) + i.BaseRecognitionException = NewBaseRecognitionException("", recognizer, recognizer.GetInputStream(), recognizer.GetParserRuleContext()) + + i.offendingToken = recognizer.GetCurrentToken() + + return i + +} + +// FailedPredicateException indicates that a semantic predicate failed during validation. Validation of predicates +// occurs when normally parsing the alternative just like Matching a token. +// Disambiguating predicate evaluation occurs when we test a predicate during +// prediction. +type FailedPredicateException struct { + *BaseRecognitionException + + ruleIndex int + predicateIndex int + predicate string +} + +//goland:noinspection GoUnusedExportedFunction +func NewFailedPredicateException(recognizer Parser, predicate string, message string) *FailedPredicateException { + + f := new(FailedPredicateException) + + f.BaseRecognitionException = NewBaseRecognitionException(f.formatMessage(predicate, message), recognizer, recognizer.GetInputStream(), recognizer.GetParserRuleContext()) + + s := recognizer.GetInterpreter().atn.states[recognizer.GetState()] + trans := s.GetTransitions()[0] + if trans2, ok := trans.(*PredicateTransition); ok { + f.ruleIndex = trans2.ruleIndex + f.predicateIndex = trans2.predIndex + } else { + f.ruleIndex = 0 + f.predicateIndex = 0 + } + f.predicate = predicate + f.offendingToken = recognizer.GetCurrentToken() + + return f +} + +func (f *FailedPredicateException) formatMessage(predicate, message string) string { + if message != "" { + return message + } + + return "failed predicate: {" + predicate + "}?" +} + +type ParseCancellationException struct { +} + +func (p ParseCancellationException) GetOffendingToken() Token { + //TODO implement me + panic("implement me") +} + +func (p ParseCancellationException) GetMessage() string { + //TODO implement me + panic("implement me") +} + +func (p ParseCancellationException) GetInputStream() IntStream { + //TODO implement me + panic("implement me") +} + +func NewParseCancellationException() *ParseCancellationException { + // Error.call(this) + // Error.captureStackTrace(this, ParseCancellationException) + return new(ParseCancellationException) +} diff --git a/vendor/github.com/antlr4-go/antlr/v4/file_stream.go b/vendor/github.com/antlr4-go/antlr/v4/file_stream.go new file mode 100644 index 000000000..5f65f809b --- /dev/null +++ b/vendor/github.com/antlr4-go/antlr/v4/file_stream.go @@ -0,0 +1,67 @@ +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +import ( + "bufio" + "os" +) + +// This is an InputStream that is loaded from a file all at once +// when you construct the object. + +type FileStream struct { + InputStream + filename string +} + +//goland:noinspection GoUnusedExportedFunction +func NewFileStream(fileName string) (*FileStream, error) { + + f, err := os.Open(fileName) + if err != nil { + return nil, err + } + + defer func(f *os.File) { + errF := f.Close() + if errF != nil { + } + }(f) + + reader := bufio.NewReader(f) + fInfo, err := f.Stat() + if err != nil { + return nil, err + } + + fs := &FileStream{ + InputStream: InputStream{ + index: 0, + name: fileName, + }, + filename: fileName, + } + + // Pre-build the buffer and read runes efficiently + // + fs.data = make([]rune, 0, fInfo.Size()) + for { + r, _, err := reader.ReadRune() + if err != nil { + break + } + fs.data = append(fs.data, r) + } + fs.size = len(fs.data) // Size in runes + + // All done. + // + return fs, nil +} + +func (f *FileStream) GetSourceName() string { + return f.filename +} diff --git a/vendor/github.com/antlr4-go/antlr/v4/input_stream.go b/vendor/github.com/antlr4-go/antlr/v4/input_stream.go new file mode 100644 index 000000000..b737fe85f --- /dev/null +++ b/vendor/github.com/antlr4-go/antlr/v4/input_stream.go @@ -0,0 +1,157 @@ +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +import ( + "bufio" + "io" +) + +type InputStream struct { + name string + index int + data []rune + size int +} + +// NewIoStream creates a new input stream from the given io.Reader reader. +// Note that the reader is read completely into memory and so it must actually +// have a stopping point - you cannot pass in a reader on an open-ended source such +// as a socket for instance. +func NewIoStream(reader io.Reader) *InputStream { + + rReader := bufio.NewReader(reader) + + is := &InputStream{ + name: "The {@code Skip} command does not have any parameters, so l action is -// implemented as a singleton instance exposed by {@link //INSTANCE}.
+// The Skip command does not have any parameters, so this action is +// implemented as a singleton instance exposed by the [LexerSkipActionINSTANCE]. type LexerSkipAction struct { *BaseLexerAction } @@ -73,17 +90,22 @@ func NewLexerSkipAction() *LexerSkipAction { return la } -// Provides a singleton instance of l parameterless lexer action. +// LexerSkipActionINSTANCE provides a singleton instance of this parameterless lexer action. var LexerSkipActionINSTANCE = NewLexerSkipAction() func (l *LexerSkipAction) execute(lexer Lexer) { lexer.Skip() } +// String returns a string representation of the current [LexerSkipAction]. func (l *LexerSkipAction) String() string { return "skip" } +func (b *LexerSkipAction) Equals(other LexerAction) bool { + return other.getActionType() == LexerActionTypeSkip +} + // Implements the {@code type} lexer action by calling {@link Lexer//setType} // // with the assigned type. @@ -125,11 +147,10 @@ func (l *LexerTypeAction) String() string { return "actionType(" + strconv.Itoa(l.thetype) + ")" } -// Implements the {@code pushMode} lexer action by calling -// {@link Lexer//pushMode} with the assigned mode. +// LexerPushModeAction implements the pushMode lexer action by calling +// [Lexer.pushMode] with the assigned mode. type LexerPushModeAction struct { *BaseLexerAction - mode int } @@ -169,10 +190,10 @@ func (l *LexerPushModeAction) String() string { return "pushMode(" + strconv.Itoa(l.mode) + ")" } -// Implements the {@code popMode} lexer action by calling {@link Lexer//popMode}. +// LexerPopModeAction implements the popMode lexer action by calling [Lexer.popMode]. // -//The {@code popMode} command does not have any parameters, so l action is -// implemented as a singleton instance exposed by {@link //INSTANCE}.
+// The popMode command does not have any parameters, so this action is +// implemented as a singleton instance exposed by [LexerPopModeActionINSTANCE] type LexerPopModeAction struct { *BaseLexerAction } @@ -224,11 +245,10 @@ func (l *LexerMoreAction) String() string { return "more" } -// Implements the {@code mode} lexer action by calling {@link Lexer//mode} with +// LexerModeAction implements the mode lexer action by calling [Lexer.mode] with // the assigned mode. type LexerModeAction struct { *BaseLexerAction - mode int } @@ -322,16 +342,19 @@ func (l *LexerCustomAction) Equals(other LexerAction) bool { } } -// Implements the {@code channel} lexer action by calling -// {@link Lexer//setChannel} with the assigned channel. -// Constructs a New{@code channel} action with the specified channel value. -// @param channel The channel value to pass to {@link Lexer//setChannel}. +// LexerChannelAction implements the channel lexer action by calling +// [Lexer.setChannel] with the assigned channel. +// +// Constructs a new channel action with the specified channel value. type LexerChannelAction struct { *BaseLexerAction - channel int } +// NewLexerChannelAction creates a channel lexer action by calling +// [Lexer.setChannel] with the assigned channel. +// +// Constructs a new channel action with the specified channel value. func NewLexerChannelAction(channel int) *LexerChannelAction { l := new(LexerChannelAction) l.BaseLexerAction = NewBaseLexerAction(LexerActionTypeChannel) @@ -375,25 +398,22 @@ func (l *LexerChannelAction) String() string { // lexer actions, see {@link LexerActionExecutor//append} and // {@link LexerActionExecutor//fixOffsetBeforeMatch}. -// Constructs a Newindexed custom action by associating a character offset -// with a {@link LexerAction}. -// -//Note: This class is only required for lexer actions for which -// {@link LexerAction//isPositionDependent} returns {@code true}.
-// -// @param offset The offset into the input {@link CharStream}, relative to -// the token start index, at which the specified lexer action should be -// executed. -// @param action The lexer action to execute at a particular offset in the -// input {@link CharStream}. type LexerIndexedCustomAction struct { *BaseLexerAction - offset int lexerAction LexerAction isPositionDependent bool } +// NewLexerIndexedCustomAction constructs a new indexed custom action by associating a character offset +// with a [LexerAction]. +// +// Note: This class is only required for lexer actions for which +// [LexerAction.isPositionDependent] returns true. +// +// The offset points into the input [CharStream], relative to +// the token start index, at which the specified lexerAction should be +// executed. func NewLexerIndexedCustomAction(offset int, lexerAction LexerAction) *LexerIndexedCustomAction { l := new(LexerIndexedCustomAction) diff --git a/vendor/github.com/antlr4-go/antlr/v4/lexer_action_executor.go b/vendor/github.com/antlr4-go/antlr/v4/lexer_action_executor.go new file mode 100644 index 000000000..dfc28c32b --- /dev/null +++ b/vendor/github.com/antlr4-go/antlr/v4/lexer_action_executor.go @@ -0,0 +1,173 @@ +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +import "golang.org/x/exp/slices" + +// Represents an executor for a sequence of lexer actions which traversed during +// the Matching operation of a lexer rule (token). +// +//The executor tracks position information for position-dependent lexer actions +// efficiently, ensuring that actions appearing only at the end of the rule do +// not cause bloating of the {@link DFA} created for the lexer.
+ +type LexerActionExecutor struct { + lexerActions []LexerAction + cachedHash int +} + +func NewLexerActionExecutor(lexerActions []LexerAction) *LexerActionExecutor { + + if lexerActions == nil { + lexerActions = make([]LexerAction, 0) + } + + l := new(LexerActionExecutor) + + l.lexerActions = lexerActions + + // Caches the result of {@link //hashCode} since the hash code is an element + // of the performance-critical {@link ATNConfig//hashCode} operation. + l.cachedHash = murmurInit(0) + for _, a := range lexerActions { + l.cachedHash = murmurUpdate(l.cachedHash, a.Hash()) + } + l.cachedHash = murmurFinish(l.cachedHash, len(lexerActions)) + + return l +} + +// LexerActionExecutorappend creates a [LexerActionExecutor] which executes the actions for +// the input [LexerActionExecutor] followed by a specified +// [LexerAction]. +// TODO: This does not match the Java code +func LexerActionExecutorappend(lexerActionExecutor *LexerActionExecutor, lexerAction LexerAction) *LexerActionExecutor { + if lexerActionExecutor == nil { + return NewLexerActionExecutor([]LexerAction{lexerAction}) + } + + return NewLexerActionExecutor(append(lexerActionExecutor.lexerActions, lexerAction)) +} + +// fixOffsetBeforeMatch creates a [LexerActionExecutor] which encodes the current offset +// for position-dependent lexer actions. +// +// Normally, when the executor encounters lexer actions where +// [LexerAction.isPositionDependent] returns true, it calls +// [IntStream.Seek] on the input [CharStream] to set the input +// position to the end of the current token. This behavior provides +// for efficient [DFA] representation of lexer actions which appear at the end +// of a lexer rule, even when the lexer rule Matches a variable number of +// characters. +// +// Prior to traversing a Match transition in the [ATN], the current offset +// from the token start index is assigned to all position-dependent lexer +// actions which have not already been assigned a fixed offset. By storing +// the offsets relative to the token start index, the [DFA] representation of +// lexer actions which appear in the middle of tokens remains efficient due +// to sharing among tokens of the same Length, regardless of their absolute +// position in the input stream. +// +// If the current executor already has offsets assigned to all +// position-dependent lexer actions, the method returns this instance. +// +// The offset is assigned to all position-dependent +// lexer actions which do not already have offsets assigned. +// +// The func returns a [LexerActionExecutor] that stores input stream offsets +// for all position-dependent lexer actions. +func (l *LexerActionExecutor) fixOffsetBeforeMatch(offset int) *LexerActionExecutor { + var updatedLexerActions []LexerAction + for i := 0; i < len(l.lexerActions); i++ { + _, ok := l.lexerActions[i].(*LexerIndexedCustomAction) + if l.lexerActions[i].getIsPositionDependent() && !ok { + if updatedLexerActions == nil { + updatedLexerActions = make([]LexerAction, 0, len(l.lexerActions)) + updatedLexerActions = append(updatedLexerActions, l.lexerActions...) + } + updatedLexerActions[i] = NewLexerIndexedCustomAction(offset, l.lexerActions[i]) + } + } + if updatedLexerActions == nil { + return l + } + + return NewLexerActionExecutor(updatedLexerActions) +} + +// Execute the actions encapsulated by l executor within the context of a +// particular {@link Lexer}. +// +//This method calls {@link IntStream//seek} to set the position of the +// {@code input} {@link CharStream} prior to calling +// {@link LexerAction//execute} on a position-dependent action. Before the +// method returns, the input position will be restored to the same position +// it was in when the method was invoked.
+// +// @param lexer The lexer instance. +// @param input The input stream which is the source for the current token. +// When l method is called, the current {@link IntStream//index} for +// {@code input} should be the start of the following token, i.e. 1 +// character past the end of the current token. +// @param startIndex The token start index. This value may be passed to +// {@link IntStream//seek} to set the {@code input} position to the beginning +// of the token. +// / +func (l *LexerActionExecutor) execute(lexer Lexer, input CharStream, startIndex int) { + requiresSeek := false + stopIndex := input.Index() + + defer func() { + if requiresSeek { + input.Seek(stopIndex) + } + }() + + for i := 0; i < len(l.lexerActions); i++ { + lexerAction := l.lexerActions[i] + if la, ok := lexerAction.(*LexerIndexedCustomAction); ok { + offset := la.offset + input.Seek(startIndex + offset) + lexerAction = la.lexerAction + requiresSeek = (startIndex + offset) != stopIndex + } else if lexerAction.getIsPositionDependent() { + input.Seek(stopIndex) + requiresSeek = false + } + lexerAction.execute(lexer) + } +} + +func (l *LexerActionExecutor) Hash() int { + if l == nil { + // TODO: Why is this here? l should not be nil + return 61 + } + + // TODO: This is created from the action itself when the struct is created - will this be an issue at some point? Java uses the runtime assign hashcode + return l.cachedHash +} + +func (l *LexerActionExecutor) Equals(other interface{}) bool { + if l == other { + return true + } + othert, ok := other.(*LexerActionExecutor) + if !ok { + return false + } + if othert == nil { + return false + } + if l.cachedHash != othert.cachedHash { + return false + } + if len(l.lexerActions) != len(othert.lexerActions) { + return false + } + return slices.EqualFunc(l.lexerActions, othert.lexerActions, func(i, j LexerAction) bool { + return i.Equals(j) + }) +} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/lexer_atn_simulator.go b/vendor/github.com/antlr4-go/antlr/v4/lexer_atn_simulator.go similarity index 80% rename from vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/lexer_atn_simulator.go rename to vendor/github.com/antlr4-go/antlr/v4/lexer_atn_simulator.go index c573b7521..fe938b025 100644 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/lexer_atn_simulator.go +++ b/vendor/github.com/antlr4-go/antlr/v4/lexer_atn_simulator.go @@ -10,10 +10,8 @@ import ( "strings" ) +//goland:noinspection GoUnusedGlobalVariable var ( - LexerATNSimulatorDebug = false - LexerATNSimulatorDFADebug = false - LexerATNSimulatorMinDFAEdge = 0 LexerATNSimulatorMaxDFAEdge = 127 // forces unicode to stay in ATN @@ -32,11 +30,11 @@ type ILexerATNSimulator interface { } type LexerATNSimulator struct { - *BaseATNSimulator + BaseATNSimulator recog Lexer predictionMode int - mergeCache DoubleDict + mergeCache *JPCMap2 startIndex int Line int CharPositionInLine int @@ -46,27 +44,35 @@ type LexerATNSimulator struct { } func NewLexerATNSimulator(recog Lexer, atn *ATN, decisionToDFA []*DFA, sharedContextCache *PredictionContextCache) *LexerATNSimulator { - l := new(LexerATNSimulator) - - l.BaseATNSimulator = NewBaseATNSimulator(atn, sharedContextCache) + l := &LexerATNSimulator{ + BaseATNSimulator: BaseATNSimulator{ + atn: atn, + sharedContextCache: sharedContextCache, + }, + } l.decisionToDFA = decisionToDFA l.recog = recog + // The current token's starting index into the character stream. // Shared across DFA to ATN simulation in case the ATN fails and the // DFA did not have a previous accept state. In l case, we use the // ATN-generated exception object. l.startIndex = -1 - // line number 1..n within the input/// + + // line number 1..n within the input l.Line = 1 + // The index of the character relative to the beginning of the line - // 0..n-1/// + // 0..n-1 l.CharPositionInLine = 0 + l.mode = LexerDefaultMode + // Used during DFA/ATN exec to record the most recent accept configuration // info l.prevAccept = NewSimState() - // done + return l } @@ -114,7 +120,7 @@ func (l *LexerATNSimulator) reset() { func (l *LexerATNSimulator) MatchATN(input CharStream) int { startState := l.atn.modeToStartState[l.mode] - if LexerATNSimulatorDebug { + if runtimeConfig.lexerATNSimulatorDebug { fmt.Println("MatchATN mode " + strconv.Itoa(l.mode) + " start: " + startState.String()) } oldMode := l.mode @@ -126,7 +132,7 @@ func (l *LexerATNSimulator) MatchATN(input CharStream) int { predict := l.execATN(input, next) - if LexerATNSimulatorDebug { + if runtimeConfig.lexerATNSimulatorDebug { fmt.Println("DFA after MatchATN: " + l.decisionToDFA[oldMode].ToLexerString()) } return predict @@ -134,18 +140,18 @@ func (l *LexerATNSimulator) MatchATN(input CharStream) int { func (l *LexerATNSimulator) execATN(input CharStream, ds0 *DFAState) int { - if LexerATNSimulatorDebug { + if runtimeConfig.lexerATNSimulatorDebug { fmt.Println("start state closure=" + ds0.configs.String()) } if ds0.isAcceptState { - // allow zero-length tokens + // allow zero-Length tokens l.captureSimState(l.prevAccept, input, ds0) } t := input.LA(1) s := ds0 // s is current/from DFA state for { // while more work - if LexerATNSimulatorDebug { + if runtimeConfig.lexerATNSimulatorDebug { fmt.Println("execATN loop starting closure: " + s.configs.String()) } @@ -188,7 +194,7 @@ func (l *LexerATNSimulator) execATN(input CharStream, ds0 *DFAState) int { } } t = input.LA(1) - s = target // flip current DFA target becomes Newsrc/from state + s = target // flip current DFA target becomes new src/from state } return l.failOrAccept(l.prevAccept, input, s.configs, t) @@ -214,43 +220,39 @@ func (l *LexerATNSimulator) getExistingTargetState(s *DFAState, t int) *DFAState return nil } target := s.getIthEdge(t - LexerATNSimulatorMinDFAEdge) - if LexerATNSimulatorDebug && target != nil { + if runtimeConfig.lexerATNSimulatorDebug && target != nil { fmt.Println("reuse state " + strconv.Itoa(s.stateNumber) + " edge to " + strconv.Itoa(target.stateNumber)) } return target } -// Compute a target state for an edge in the DFA, and attempt to add the -// computed state and corresponding edge to the DFA. +// computeTargetState computes a target state for an edge in the [DFA], and attempt to add the +// computed state and corresponding edge to the [DFA]. // -// @param input The input stream -// @param s The current DFA state -// @param t The next input symbol -// -// @return The computed target DFA state for the given input symbol -// {@code t}. If {@code t} does not lead to a valid DFA state, l method -// returns {@link //ERROR}. +// The func returns the computed target [DFA] state for the given input symbol t. +// If this does not lead to a valid [DFA] state, this method +// returns ATNSimulatorError. func (l *LexerATNSimulator) computeTargetState(input CharStream, s *DFAState, t int) *DFAState { reach := NewOrderedATNConfigSet() // if we don't find an existing DFA state // Fill reach starting from closure, following t transitions - l.getReachableConfigSet(input, s.configs, reach.BaseATNConfigSet, t) + l.getReachableConfigSet(input, s.configs, reach, t) if len(reach.configs) == 0 { // we got nowhere on t from s if !reach.hasSemanticContext { // we got nowhere on t, don't panic out l knowledge it'd - // cause a failover from DFA later. + // cause a fail-over from DFA later. l.addDFAEdge(s, t, ATNSimulatorError, nil) } // stop when we can't Match any more char return ATNSimulatorError } // Add an edge from s to target DFA found/created for reach - return l.addDFAEdge(s, t, nil, reach.BaseATNConfigSet) + return l.addDFAEdge(s, t, nil, reach) } -func (l *LexerATNSimulator) failOrAccept(prevAccept *SimState, input CharStream, reach ATNConfigSet, t int) int { +func (l *LexerATNSimulator) failOrAccept(prevAccept *SimState, input CharStream, reach *ATNConfigSet, t int) int { if l.prevAccept.dfaState != nil { lexerActionExecutor := prevAccept.dfaState.lexerActionExecutor l.accept(input, lexerActionExecutor, l.startIndex, prevAccept.index, prevAccept.line, prevAccept.column) @@ -265,34 +267,35 @@ func (l *LexerATNSimulator) failOrAccept(prevAccept *SimState, input CharStream, panic(NewLexerNoViableAltException(l.recog, input, l.startIndex, reach)) } -// Given a starting configuration set, figure out all ATN configurations -// we can reach upon input {@code t}. Parameter {@code reach} is a return -// parameter. -func (l *LexerATNSimulator) getReachableConfigSet(input CharStream, closure ATNConfigSet, reach ATNConfigSet, t int) { +// getReachableConfigSet when given a starting configuration set, figures out all [ATN] configurations +// we can reach upon input t. +// +// Parameter reach is a return parameter. +func (l *LexerATNSimulator) getReachableConfigSet(input CharStream, closure *ATNConfigSet, reach *ATNConfigSet, t int) { // l is used to Skip processing for configs which have a lower priority - // than a config that already reached an accept state for the same rule + // than a runtimeConfig that already reached an accept state for the same rule SkipAlt := ATNInvalidAltNumber - for _, cfg := range closure.GetItems() { - currentAltReachedAcceptState := (cfg.GetAlt() == SkipAlt) - if currentAltReachedAcceptState && cfg.(*LexerATNConfig).passedThroughNonGreedyDecision { + for _, cfg := range closure.configs { + currentAltReachedAcceptState := cfg.GetAlt() == SkipAlt + if currentAltReachedAcceptState && cfg.passedThroughNonGreedyDecision { continue } - if LexerATNSimulatorDebug { + if runtimeConfig.lexerATNSimulatorDebug { - fmt.Printf("testing %s at %s\n", l.GetTokenName(t), cfg.String()) // l.recog, true)) + fmt.Printf("testing %s at %s\n", l.GetTokenName(t), cfg.String()) } for _, trans := range cfg.GetState().GetTransitions() { target := l.getReachableTarget(trans, t) if target != nil { - lexerActionExecutor := cfg.(*LexerATNConfig).lexerActionExecutor + lexerActionExecutor := cfg.lexerActionExecutor if lexerActionExecutor != nil { lexerActionExecutor = lexerActionExecutor.fixOffsetBeforeMatch(input.Index() - l.startIndex) } - treatEOFAsEpsilon := (t == TokenEOF) - config := NewLexerATNConfig3(cfg.(*LexerATNConfig), target, lexerActionExecutor) + treatEOFAsEpsilon := t == TokenEOF + config := NewLexerATNConfig3(cfg, target, lexerActionExecutor) if l.closure(input, config, reach, currentAltReachedAcceptState, true, treatEOFAsEpsilon) { // any remaining configs for l alt have a lower priority @@ -305,7 +308,7 @@ func (l *LexerATNSimulator) getReachableConfigSet(input CharStream, closure ATNC } func (l *LexerATNSimulator) accept(input CharStream, lexerActionExecutor *LexerActionExecutor, startIndex, index, line, charPos int) { - if LexerATNSimulatorDebug { + if runtimeConfig.lexerATNSimulatorDebug { fmt.Printf("ACTION %v\n", lexerActionExecutor) } // seek to after last char in token @@ -325,7 +328,7 @@ func (l *LexerATNSimulator) getReachableTarget(trans Transition, t int) ATNState return nil } -func (l *LexerATNSimulator) computeStartState(input CharStream, p ATNState) *OrderedATNConfigSet { +func (l *LexerATNSimulator) computeStartState(input CharStream, p ATNState) *ATNConfigSet { configs := NewOrderedATNConfigSet() for i := 0; i < len(p.GetTransitions()); i++ { target := p.GetTransitions()[i].getTarget() @@ -336,25 +339,24 @@ func (l *LexerATNSimulator) computeStartState(input CharStream, p ATNState) *Ord return configs } -// Since the alternatives within any lexer decision are ordered by -// preference, l method stops pursuing the closure as soon as an accept +// closure since the alternatives within any lexer decision are ordered by +// preference, this method stops pursuing the closure as soon as an accept // state is reached. After the first accept state is reached by depth-first -// search from {@code config}, all other (potentially reachable) states for -// l rule would have a lower priority. +// search from runtimeConfig, all other (potentially reachable) states for +// this rule would have a lower priority. // -// @return {@code true} if an accept state is reached, otherwise -// {@code false}. -func (l *LexerATNSimulator) closure(input CharStream, config *LexerATNConfig, configs ATNConfigSet, +// The func returns true if an accept state is reached. +func (l *LexerATNSimulator) closure(input CharStream, config *ATNConfig, configs *ATNConfigSet, currentAltReachedAcceptState, speculative, treatEOFAsEpsilon bool) bool { - if LexerATNSimulatorDebug { - fmt.Println("closure(" + config.String() + ")") // config.String(l.recog, true) + ")") + if runtimeConfig.lexerATNSimulatorDebug { + fmt.Println("closure(" + config.String() + ")") } _, ok := config.state.(*RuleStopState) if ok { - if LexerATNSimulatorDebug { + if runtimeConfig.lexerATNSimulatorDebug { if l.recog != nil { fmt.Printf("closure at %s rule stop %s\n", l.recog.GetRuleNames()[config.state.GetRuleIndex()], config) } else { @@ -401,10 +403,10 @@ func (l *LexerATNSimulator) closure(input CharStream, config *LexerATNConfig, co } // side-effect: can alter configs.hasSemanticContext -func (l *LexerATNSimulator) getEpsilonTarget(input CharStream, config *LexerATNConfig, trans Transition, - configs ATNConfigSet, speculative, treatEOFAsEpsilon bool) *LexerATNConfig { +func (l *LexerATNSimulator) getEpsilonTarget(input CharStream, config *ATNConfig, trans Transition, + configs *ATNConfigSet, speculative, treatEOFAsEpsilon bool) *ATNConfig { - var cfg *LexerATNConfig + var cfg *ATNConfig if trans.getSerializationType() == TransitionRULE { @@ -435,10 +437,10 @@ func (l *LexerATNSimulator) getEpsilonTarget(input CharStream, config *LexerATNC pt := trans.(*PredicateTransition) - if LexerATNSimulatorDebug { + if runtimeConfig.lexerATNSimulatorDebug { fmt.Println("EVAL rule " + strconv.Itoa(trans.(*PredicateTransition).ruleIndex) + ":" + strconv.Itoa(pt.predIndex)) } - configs.SetHasSemanticContext(true) + configs.hasSemanticContext = true if l.evaluatePredicate(input, pt.ruleIndex, pt.predIndex, speculative) { cfg = NewLexerATNConfig4(config, trans.getTarget()) } @@ -449,7 +451,7 @@ func (l *LexerATNSimulator) getEpsilonTarget(input CharStream, config *LexerATNC // TODO: if the entry rule is invoked recursively, some // actions may be executed during the recursive call. The // problem can appear when hasEmptyPath() is true but - // isEmpty() is false. In l case, the config needs to be + // isEmpty() is false. In this case, the config needs to be // split into two contexts - one with just the empty path // and another with everything but the empty path. // Unfortunately, the current algorithm does not allow @@ -476,26 +478,18 @@ func (l *LexerATNSimulator) getEpsilonTarget(input CharStream, config *LexerATNC return cfg } -// Evaluate a predicate specified in the lexer. +// evaluatePredicate eEvaluates a predicate specified in the lexer. // -//If {@code speculative} is {@code true}, l method was called before -// {@link //consume} for the Matched character. This method should call -// {@link //consume} before evaluating the predicate to ensure position -// sensitive values, including {@link Lexer//GetText}, {@link Lexer//GetLine}, -// and {@link Lexer//getcolumn}, properly reflect the current -// lexer state. This method should restore {@code input} and the simulator -// to the original state before returning (i.e. undo the actions made by the -// call to {@link //consume}.
+// If speculative is true, this method was called before +// [consume] for the Matched character. This method should call +// [consume] before evaluating the predicate to ensure position +// sensitive values, including [GetText], [GetLine], +// and [GetColumn], properly reflect the current +// lexer state. This method should restore input and the simulator +// to the original state before returning, i.e. undo the actions made by the +// call to [Consume]. // -// @param input The input stream. -// @param ruleIndex The rule containing the predicate. -// @param predIndex The index of the predicate within the rule. -// @param speculative {@code true} if the current index in {@code input} is -// one character before the predicate's location. -// -// @return {@code true} if the specified predicate evaluates to -// {@code true}. -// / +// The func returns true if the specified predicate evaluates to true. func (l *LexerATNSimulator) evaluatePredicate(input CharStream, ruleIndex, predIndex int, speculative bool) bool { // assume true if no recognizer was provided if l.recog == nil { @@ -527,7 +521,7 @@ func (l *LexerATNSimulator) captureSimState(settings *SimState, input CharStream settings.dfaState = dfaState } -func (l *LexerATNSimulator) addDFAEdge(from *DFAState, tk int, to *DFAState, cfgs ATNConfigSet) *DFAState { +func (l *LexerATNSimulator) addDFAEdge(from *DFAState, tk int, to *DFAState, cfgs *ATNConfigSet) *DFAState { if to == nil && cfgs != nil { // leading to l call, ATNConfigSet.hasSemanticContext is used as a // marker indicating dynamic predicate evaluation makes l edge @@ -539,10 +533,9 @@ func (l *LexerATNSimulator) addDFAEdge(from *DFAState, tk int, to *DFAState, cfg // TJP notes: next time through the DFA, we see a pred again and eval. // If that gets us to a previously created (but dangling) DFA // state, we can continue in pure DFA mode from there. - // / - suppressEdge := cfgs.HasSemanticContext() - cfgs.SetHasSemanticContext(false) - + // + suppressEdge := cfgs.hasSemanticContext + cfgs.hasSemanticContext = false to = l.addDFAState(cfgs, true) if suppressEdge { @@ -554,7 +547,7 @@ func (l *LexerATNSimulator) addDFAEdge(from *DFAState, tk int, to *DFAState, cfg // Only track edges within the DFA bounds return to } - if LexerATNSimulatorDebug { + if runtimeConfig.lexerATNSimulatorDebug { fmt.Println("EDGE " + from.String() + " -> " + to.String() + " upon " + strconv.Itoa(tk)) } l.atn.edgeMu.Lock() @@ -572,13 +565,12 @@ func (l *LexerATNSimulator) addDFAEdge(from *DFAState, tk int, to *DFAState, cfg // configurations already. This method also detects the first // configuration containing an ATN rule stop state. Later, when // traversing the DFA, we will know which rule to accept. -func (l *LexerATNSimulator) addDFAState(configs ATNConfigSet, suppressEdge bool) *DFAState { +func (l *LexerATNSimulator) addDFAState(configs *ATNConfigSet, suppressEdge bool) *DFAState { proposed := NewDFAState(-1, configs) - var firstConfigWithRuleStopState ATNConfig - - for _, cfg := range configs.GetItems() { + var firstConfigWithRuleStopState *ATNConfig + for _, cfg := range configs.configs { _, ok := cfg.GetState().(*RuleStopState) if ok { @@ -588,14 +580,14 @@ func (l *LexerATNSimulator) addDFAState(configs ATNConfigSet, suppressEdge bool) } if firstConfigWithRuleStopState != nil { proposed.isAcceptState = true - proposed.lexerActionExecutor = firstConfigWithRuleStopState.(*LexerATNConfig).lexerActionExecutor + proposed.lexerActionExecutor = firstConfigWithRuleStopState.lexerActionExecutor proposed.setPrediction(l.atn.ruleToTokenType[firstConfigWithRuleStopState.GetState().GetRuleIndex()]) } dfa := l.decisionToDFA[l.mode] l.atn.stateMu.Lock() defer l.atn.stateMu.Unlock() - existing, present := dfa.states.Get(proposed) + existing, present := dfa.Get(proposed) if present { // This state was already present, so just return it. @@ -605,10 +597,11 @@ func (l *LexerATNSimulator) addDFAState(configs ATNConfigSet, suppressEdge bool) // We need to add the new state // - proposed.stateNumber = dfa.states.Len() - configs.SetReadOnly(true) + proposed.stateNumber = dfa.Len() + configs.readOnly = true + configs.configLookup = nil // Not needed now proposed.configs = configs - dfa.states.Put(proposed) + dfa.Put(proposed) } if !suppressEdge { dfa.setS0(proposed) @@ -620,7 +613,7 @@ func (l *LexerATNSimulator) getDFA(mode int) *DFA { return l.decisionToDFA[mode] } -// Get the text Matched so far for the current token. +// GetText returns the text [Match]ed so far for the current token. func (l *LexerATNSimulator) GetText(input CharStream) string { // index is first lookahead char, don't include. return input.GetTextFromInterval(NewInterval(l.startIndex, input.Index()-1)) diff --git a/vendor/github.com/antlr4-go/antlr/v4/ll1_analyzer.go b/vendor/github.com/antlr4-go/antlr/v4/ll1_analyzer.go new file mode 100644 index 000000000..4955ac876 --- /dev/null +++ b/vendor/github.com/antlr4-go/antlr/v4/ll1_analyzer.go @@ -0,0 +1,218 @@ +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +type LL1Analyzer struct { + atn *ATN +} + +func NewLL1Analyzer(atn *ATN) *LL1Analyzer { + la := new(LL1Analyzer) + la.atn = atn + return la +} + +const ( + // LL1AnalyzerHitPred is a special value added to the lookahead sets to indicate that we hit + // a predicate during analysis if + // + // seeThruPreds==false + LL1AnalyzerHitPred = TokenInvalidType +) + +// * +// Calculates the SLL(1) expected lookahead set for each outgoing transition +// of an {@link ATNState}. The returned array has one element for each +// outgoing transition in {@code s}. If the closure from transition +// i leads to a semantic predicate before Matching a symbol, the +// element at index i of the result will be {@code nil}. +// +// @param s the ATN state +// @return the expected symbols for each outgoing transition of {@code s}. +func (la *LL1Analyzer) getDecisionLookahead(s ATNState) []*IntervalSet { + if s == nil { + return nil + } + count := len(s.GetTransitions()) + look := make([]*IntervalSet, count) + for alt := 0; alt < count; alt++ { + + look[alt] = NewIntervalSet() + lookBusy := NewJStore[*ATNConfig, Comparator[*ATNConfig]](aConfEqInst, ClosureBusyCollection, "LL1Analyzer.getDecisionLookahead for lookBusy") + la.look1(s.GetTransitions()[alt].getTarget(), nil, BasePredictionContextEMPTY, look[alt], lookBusy, NewBitSet(), false, false) + + // Wipe out lookahead for la alternative if we found nothing, + // or we had a predicate when we !seeThruPreds + if look[alt].length() == 0 || look[alt].contains(LL1AnalyzerHitPred) { + look[alt] = nil + } + } + return look +} + +// Look computes the set of tokens that can follow s in the [ATN] in the +// specified ctx. +// +// If ctx is nil and the end of the rule containing +// s is reached, [EPSILON] is added to the result set. +// +// If ctx is not nil and the end of the outermost rule is +// reached, [EOF] is added to the result set. +// +// Parameter s the ATN state, and stopState is the ATN state to stop at. This can be a +// [BlockEndState] to detect epsilon paths through a closure. +// +// Parameter ctx is the complete parser context, or nil if the context +// should be ignored +// +// The func returns the set of tokens that can follow s in the [ATN] in the +// specified ctx. +func (la *LL1Analyzer) Look(s, stopState ATNState, ctx RuleContext) *IntervalSet { + r := NewIntervalSet() + var lookContext *PredictionContext + if ctx != nil { + lookContext = predictionContextFromRuleContext(s.GetATN(), ctx) + } + la.look1(s, stopState, lookContext, r, NewJStore[*ATNConfig, Comparator[*ATNConfig]](aConfEqInst, ClosureBusyCollection, "LL1Analyzer.Look for la.look1()"), + NewBitSet(), true, true) + return r +} + +//* +// Compute set of tokens that can follow {@code s} in the ATN in the +// specified {@code ctx}. +// +//If {@code ctx} is {@code nil} and {@code stopState} or the end of the +// rule containing {@code s} is reached, {@link Token//EPSILON} is added to +// the result set. If {@code ctx} is not {@code nil} and {@code addEOF} is +// {@code true} and {@code stopState} or the end of the outermost rule is +// reached, {@link Token//EOF} is added to the result set.
+// +// @param s the ATN state. +// @param stopState the ATN state to stop at. This can be a +// {@link BlockEndState} to detect epsilon paths through a closure. +// @param ctx The outer context, or {@code nil} if the outer context should +// not be used. +// @param look The result lookahead set. +// @param lookBusy A set used for preventing epsilon closures in the ATN +// from causing a stack overflow. Outside code should pass +// {@code NewSetIf the symbol type does not Match, +// {@link ANTLRErrorStrategy//recoverInline} is called on the current error +// strategy to attempt recovery. If {@link //getBuildParseTree} is +// {@code true} and the token index of the symbol returned by +// {@link ANTLRErrorStrategy//recoverInline} is -1, the symbol is added to +// the parse tree by calling {@link ParserRuleContext//addErrorNode}.
+// +// @param ttype the token type to Match +// @return the Matched symbol +// @panics RecognitionException if the current input symbol did not Match +// {@code ttype} and the error strategy could not recover from the +// mismatched symbol + +func (p *BaseParser) Match(ttype int) Token { + + t := p.GetCurrentToken() + + if t.GetTokenType() == ttype { + p.errHandler.ReportMatch(p) + p.Consume() + } else { + t = p.errHandler.RecoverInline(p) + if p.HasError() { + return nil + } + if p.BuildParseTrees && t.GetTokenIndex() == -1 { + + // we must have conjured up a new token during single token + // insertion if it's not the current symbol + p.ctx.AddErrorNode(t) + } + } + + return t +} + +// Match current input symbol as a wildcard. If the symbol type Matches +// (i.e. has a value greater than 0), {@link ANTLRErrorStrategy//ReportMatch} +// and {@link //consume} are called to complete the Match process. +// +//If the symbol type does not Match, +// {@link ANTLRErrorStrategy//recoverInline} is called on the current error +// strategy to attempt recovery. If {@link //getBuildParseTree} is +// {@code true} and the token index of the symbol returned by +// {@link ANTLRErrorStrategy//recoverInline} is -1, the symbol is added to +// the parse tree by calling {@link ParserRuleContext//addErrorNode}.
+// +// @return the Matched symbol +// @panics RecognitionException if the current input symbol did not Match +// a wildcard and the error strategy could not recover from the mismatched +// symbol + +func (p *BaseParser) MatchWildcard() Token { + t := p.GetCurrentToken() + if t.GetTokenType() > 0 { + p.errHandler.ReportMatch(p) + p.Consume() + } else { + t = p.errHandler.RecoverInline(p) + if p.BuildParseTrees && t.GetTokenIndex() == -1 { + // we must have conjured up a new token during single token + // insertion if it's not the current symbol + p.ctx.AddErrorNode(t) + } + } + return t +} + +func (p *BaseParser) GetParserRuleContext() ParserRuleContext { + return p.ctx +} + +func (p *BaseParser) SetParserRuleContext(v ParserRuleContext) { + p.ctx = v +} + +func (p *BaseParser) GetParseListeners() []ParseTreeListener { + if p.parseListeners == nil { + return make([]ParseTreeListener, 0) + } + return p.parseListeners +} + +// AddParseListener registers listener to receive events during the parsing process. +// +// To support output-preserving grammar transformations (including but not +// limited to left-recursion removal, automated left-factoring, and +// optimized code generation), calls to listener methods during the parse +// may differ substantially from calls made by +// [ParseTreeWalker.DEFAULT] used after the parse is complete. In +// particular, rule entry and exit events may occur in a different order +// during the parse than after the parser. In addition, calls to certain +// rule entry methods may be omitted. +// +// With the following specific exceptions, calls to listener events are +// deterministic, i.e. for identical input the calls to listener +// methods will be the same. +// +// - Alterations to the grammar used to generate code may change the +// behavior of the listener calls. +// - Alterations to the command line options passed to ANTLR 4 when +// generating the parser may change the behavior of the listener calls. +// - Changing the version of the ANTLR Tool used to generate the parser +// may change the behavior of the listener calls. +func (p *BaseParser) AddParseListener(listener ParseTreeListener) { + if listener == nil { + panic("listener") + } + if p.parseListeners == nil { + p.parseListeners = make([]ParseTreeListener, 0) + } + p.parseListeners = append(p.parseListeners, listener) +} + +// RemoveParseListener removes listener from the list of parse listeners. +// +// If listener is nil or has not been added as a parse +// listener, this func does nothing. +func (p *BaseParser) RemoveParseListener(listener ParseTreeListener) { + + if p.parseListeners != nil { + + idx := -1 + for i, v := range p.parseListeners { + if v == listener { + idx = i + break + } + } + + if idx == -1 { + return + } + + // remove the listener from the slice + p.parseListeners = append(p.parseListeners[0:idx], p.parseListeners[idx+1:]...) + + if len(p.parseListeners) == 0 { + p.parseListeners = nil + } + } +} + +// Remove all parse listeners. +func (p *BaseParser) removeParseListeners() { + p.parseListeners = nil +} + +// TriggerEnterRuleEvent notifies all parse listeners of an enter rule event. +func (p *BaseParser) TriggerEnterRuleEvent() { + if p.parseListeners != nil { + ctx := p.ctx + for _, listener := range p.parseListeners { + listener.EnterEveryRule(ctx) + ctx.EnterRule(listener) + } + } +} + +// TriggerExitRuleEvent notifies any parse listeners of an exit rule event. +func (p *BaseParser) TriggerExitRuleEvent() { + if p.parseListeners != nil { + // reverse order walk of listeners + ctx := p.ctx + l := len(p.parseListeners) - 1 + + for i := range p.parseListeners { + listener := p.parseListeners[l-i] + ctx.ExitRule(listener) + listener.ExitEveryRule(ctx) + } + } +} + +func (p *BaseParser) GetInterpreter() *ParserATNSimulator { + return p.Interpreter +} + +func (p *BaseParser) GetATN() *ATN { + return p.Interpreter.atn +} + +func (p *BaseParser) GetTokenFactory() TokenFactory { + return p.input.GetTokenSource().GetTokenFactory() +} + +// setTokenFactory is used to tell our token source and error strategy about a new way to create tokens. +func (p *BaseParser) setTokenFactory(factory TokenFactory) { + p.input.GetTokenSource().setTokenFactory(factory) +} + +// GetATNWithBypassAlts - the ATN with bypass alternatives is expensive to create, so we create it +// lazily. +func (p *BaseParser) GetATNWithBypassAlts() { + + // TODO - Implement this? + panic("Not implemented!") + + // serializedAtn := p.getSerializedATN() + // if (serializedAtn == nil) { + // panic("The current parser does not support an ATN with bypass alternatives.") + // } + // result := p.bypassAltsAtnCache[serializedAtn] + // if (result == nil) { + // deserializationOptions := NewATNDeserializationOptions(nil) + // deserializationOptions.generateRuleBypassTransitions = true + // result = NewATNDeserializer(deserializationOptions).deserialize(serializedAtn) + // p.bypassAltsAtnCache[serializedAtn] = result + // } + // return result +} + +// The preferred method of getting a tree pattern. For example, here's a +// sample use: +// +//
+// ParseTree t = parser.expr()
+// ParseTreePattern p = parser.compileParseTreePattern("<ID>+0",
+// MyParser.RULE_expr)
+// ParseTreeMatch m = p.Match(t)
+// String id = m.Get("ID")
+//
+
+//goland:noinspection GoUnusedParameter
+func (p *BaseParser) compileParseTreePattern(pattern, patternRuleIndex, lexer Lexer) {
+
+ panic("NewParseTreePatternMatcher not implemented!")
+ //
+ // if (lexer == nil) {
+ // if (p.GetTokenStream() != nil) {
+ // tokenSource := p.GetTokenStream().GetTokenSource()
+ // if _, ok := tokenSource.(ILexer); ok {
+ // lexer = tokenSource
+ // }
+ // }
+ // }
+ // if (lexer == nil) {
+ // panic("Parser can't discover a lexer to use")
+ // }
+
+ // m := NewParseTreePatternMatcher(lexer, p)
+ // return m.compile(pattern, patternRuleIndex)
+}
+
+func (p *BaseParser) GetInputStream() IntStream {
+ return p.GetTokenStream()
+}
+
+func (p *BaseParser) SetInputStream(input TokenStream) {
+ p.SetTokenStream(input)
+}
+
+func (p *BaseParser) GetTokenStream() TokenStream {
+ return p.input
+}
+
+// SetTokenStream installs input as the token stream and resets the parser.
+func (p *BaseParser) SetTokenStream(input TokenStream) {
+ p.input = nil
+ p.reset()
+ p.input = input
+}
+
+// GetCurrentToken returns the current token at LT(1).
+//
+// [Match] needs to return the current input symbol, which gets put
+// into the label for the associated token ref e.g., x=ID.
+func (p *BaseParser) GetCurrentToken() Token {
+ return p.input.LT(1)
+}
+
+func (p *BaseParser) NotifyErrorListeners(msg string, offendingToken Token, err RecognitionException) {
+ if offendingToken == nil {
+ offendingToken = p.GetCurrentToken()
+ }
+ p._SyntaxErrors++
+ line := offendingToken.GetLine()
+ column := offendingToken.GetColumn()
+ listener := p.GetErrorListenerDispatch()
+ listener.SyntaxError(p, offendingToken, line, column, msg, err)
+}
+
+func (p *BaseParser) Consume() Token {
+ o := p.GetCurrentToken()
+ if o.GetTokenType() != TokenEOF {
+ p.GetInputStream().Consume()
+ }
+ hasListener := p.parseListeners != nil && len(p.parseListeners) > 0
+ if p.BuildParseTrees || hasListener {
+ if p.errHandler.InErrorRecoveryMode(p) {
+ node := p.ctx.AddErrorNode(o)
+ if p.parseListeners != nil {
+ for _, l := range p.parseListeners {
+ l.VisitErrorNode(node)
+ }
+ }
+
+ } else {
+ node := p.ctx.AddTokenNode(o)
+ if p.parseListeners != nil {
+ for _, l := range p.parseListeners {
+ l.VisitTerminal(node)
+ }
+ }
+ }
+ // node.invokingState = p.state
+ }
+
+ return o
+}
+
+func (p *BaseParser) addContextToParseTree() {
+ // add current context to parent if we have a parent
+ if p.ctx.GetParent() != nil {
+ p.ctx.GetParent().(ParserRuleContext).AddChild(p.ctx)
+ }
+}
+
+func (p *BaseParser) EnterRule(localctx ParserRuleContext, state, _ int) {
+ p.SetState(state)
+ p.ctx = localctx
+ p.ctx.SetStart(p.input.LT(1))
+ if p.BuildParseTrees {
+ p.addContextToParseTree()
+ }
+ if p.parseListeners != nil {
+ p.TriggerEnterRuleEvent()
+ }
+}
+
+func (p *BaseParser) ExitRule() {
+ p.ctx.SetStop(p.input.LT(-1))
+ // trigger event on ctx, before it reverts to parent
+ if p.parseListeners != nil {
+ p.TriggerExitRuleEvent()
+ }
+ p.SetState(p.ctx.GetInvokingState())
+ if p.ctx.GetParent() != nil {
+ p.ctx = p.ctx.GetParent().(ParserRuleContext)
+ } else {
+ p.ctx = nil
+ }
+}
+
+func (p *BaseParser) EnterOuterAlt(localctx ParserRuleContext, altNum int) {
+ localctx.SetAltNumber(altNum)
+ // if we have a new localctx, make sure we replace existing ctx
+ // that is previous child of parse tree
+ if p.BuildParseTrees && p.ctx != localctx {
+ if p.ctx.GetParent() != nil {
+ p.ctx.GetParent().(ParserRuleContext).RemoveLastChild()
+ p.ctx.GetParent().(ParserRuleContext).AddChild(localctx)
+ }
+ }
+ p.ctx = localctx
+}
+
+// Get the precedence level for the top-most precedence rule.
+//
+// @return The precedence level for the top-most precedence rule, or -1 if
+// the parser context is not nested within a precedence rule.
+
+func (p *BaseParser) GetPrecedence() int {
+ if len(p.precedenceStack) == 0 {
+ return -1
+ }
+
+ return p.precedenceStack[len(p.precedenceStack)-1]
+}
+
+func (p *BaseParser) EnterRecursionRule(localctx ParserRuleContext, state, _, precedence int) {
+ p.SetState(state)
+ p.precedenceStack.Push(precedence)
+ p.ctx = localctx
+ p.ctx.SetStart(p.input.LT(1))
+ if p.parseListeners != nil {
+ p.TriggerEnterRuleEvent() // simulates rule entry for
+ // left-recursive rules
+ }
+}
+
+//
+// Like {@link //EnterRule} but for recursive rules.
+
+func (p *BaseParser) PushNewRecursionContext(localctx ParserRuleContext, state, _ int) {
+ previous := p.ctx
+ previous.SetParent(localctx)
+ previous.SetInvokingState(state)
+ previous.SetStop(p.input.LT(-1))
+
+ p.ctx = localctx
+ p.ctx.SetStart(previous.GetStart())
+ if p.BuildParseTrees {
+ p.ctx.AddChild(previous)
+ }
+ if p.parseListeners != nil {
+ p.TriggerEnterRuleEvent() // simulates rule entry for
+ // left-recursive rules
+ }
+}
+
+func (p *BaseParser) UnrollRecursionContexts(parentCtx ParserRuleContext) {
+ _, _ = p.precedenceStack.Pop()
+ p.ctx.SetStop(p.input.LT(-1))
+ retCtx := p.ctx // save current ctx (return value)
+ // unroll so ctx is as it was before call to recursive method
+ if p.parseListeners != nil {
+ for p.ctx != parentCtx {
+ p.TriggerExitRuleEvent()
+ p.ctx = p.ctx.GetParent().(ParserRuleContext)
+ }
+ } else {
+ p.ctx = parentCtx
+ }
+ // hook into tree
+ retCtx.SetParent(parentCtx)
+ if p.BuildParseTrees && parentCtx != nil {
+ // add return ctx into invoking rule's tree
+ parentCtx.AddChild(retCtx)
+ }
+}
+
+func (p *BaseParser) GetInvokingContext(ruleIndex int) ParserRuleContext {
+ ctx := p.ctx
+ for ctx != nil {
+ if ctx.GetRuleIndex() == ruleIndex {
+ return ctx
+ }
+ ctx = ctx.GetParent().(ParserRuleContext)
+ }
+ return nil
+}
+
+func (p *BaseParser) Precpred(_ RuleContext, precedence int) bool {
+ return precedence >= p.precedenceStack[len(p.precedenceStack)-1]
+}
+
+//goland:noinspection GoUnusedParameter
+func (p *BaseParser) inContext(context ParserRuleContext) bool {
+ // TODO: useful in parser?
+ return false
+}
+
+// IsExpectedToken checks whether symbol can follow the current state in the
+// {ATN}. The behavior of p.method is equivalent to the following, but is
+// implemented such that the complete context-sensitive follow set does not
+// need to be explicitly constructed.
+//
+// return getExpectedTokens().contains(symbol)
+func (p *BaseParser) IsExpectedToken(symbol int) bool {
+ atn := p.Interpreter.atn
+ ctx := p.ctx
+ s := atn.states[p.state]
+ following := atn.NextTokens(s, nil)
+ if following.contains(symbol) {
+ return true
+ }
+ if !following.contains(TokenEpsilon) {
+ return false
+ }
+ for ctx != nil && ctx.GetInvokingState() >= 0 && following.contains(TokenEpsilon) {
+ invokingState := atn.states[ctx.GetInvokingState()]
+ rt := invokingState.GetTransitions()[0]
+ following = atn.NextTokens(rt.(*RuleTransition).followState, nil)
+ if following.contains(symbol) {
+ return true
+ }
+ ctx = ctx.GetParent().(ParserRuleContext)
+ }
+ if following.contains(TokenEpsilon) && symbol == TokenEOF {
+ return true
+ }
+
+ return false
+}
+
+// GetExpectedTokens and returns the set of input symbols which could follow the current parser
+// state and context, as given by [GetState] and [GetContext],
+// respectively.
+func (p *BaseParser) GetExpectedTokens() *IntervalSet {
+ return p.Interpreter.atn.getExpectedTokens(p.state, p.ctx)
+}
+
+func (p *BaseParser) GetExpectedTokensWithinCurrentRule() *IntervalSet {
+ atn := p.Interpreter.atn
+ s := atn.states[p.state]
+ return atn.NextTokens(s, nil)
+}
+
+// GetRuleIndex get a rule's index (i.e., RULE_ruleName field) or -1 if not found.
+func (p *BaseParser) GetRuleIndex(ruleName string) int {
+ var ruleIndex, ok = p.GetRuleIndexMap()[ruleName]
+ if ok {
+ return ruleIndex
+ }
+
+ return -1
+}
+
+// GetRuleInvocationStack returns a list of the rule names in your parser instance
+// leading up to a call to the current rule. You could override if
+// you want more details such as the file/line info of where
+// in the ATN a rule is invoked.
+func (p *BaseParser) GetRuleInvocationStack(c ParserRuleContext) []string {
+ if c == nil {
+ c = p.ctx
+ }
+ stack := make([]string, 0)
+ for c != nil {
+ // compute what follows who invoked us
+ ruleIndex := c.GetRuleIndex()
+ if ruleIndex < 0 {
+ stack = append(stack, "n/a")
+ } else {
+ stack = append(stack, p.GetRuleNames()[ruleIndex])
+ }
+
+ vp := c.GetParent()
+
+ if vp == nil {
+ break
+ }
+
+ c = vp.(ParserRuleContext)
+ }
+ return stack
+}
+
+// GetDFAStrings returns a list of all DFA states used for debugging purposes
+func (p *BaseParser) GetDFAStrings() string {
+ return fmt.Sprint(p.Interpreter.decisionToDFA)
+}
+
+// DumpDFA prints the whole of the DFA for debugging
+func (p *BaseParser) DumpDFA() {
+ seenOne := false
+ for _, dfa := range p.Interpreter.decisionToDFA {
+ if dfa.Len() > 0 {
+ if seenOne {
+ fmt.Println()
+ }
+ fmt.Println("Decision " + strconv.Itoa(dfa.decision) + ":")
+ fmt.Print(dfa.String(p.LiteralNames, p.SymbolicNames))
+ seenOne = true
+ }
+ }
+}
+
+func (p *BaseParser) GetSourceName() string {
+ return p.GrammarFileName
+}
+
+// SetTrace installs a trace listener for the parse.
+//
+// During a parse it is sometimes useful to listen in on the rule entry and exit
+// events as well as token Matches. This is for quick and dirty debugging.
+func (p *BaseParser) SetTrace(trace *TraceListener) {
+ if trace == nil {
+ p.RemoveParseListener(p.tracer)
+ p.tracer = nil
+ } else {
+ if p.tracer != nil {
+ p.RemoveParseListener(p.tracer)
+ }
+ p.tracer = NewTraceListener(p)
+ p.AddParseListener(p.tracer)
+ }
+}
diff --git a/vendor/github.com/antlr4-go/antlr/v4/parser_atn_simulator.go b/vendor/github.com/antlr4-go/antlr/v4/parser_atn_simulator.go
new file mode 100644
index 000000000..ae2869692
--- /dev/null
+++ b/vendor/github.com/antlr4-go/antlr/v4/parser_atn_simulator.go
@@ -0,0 +1,1668 @@
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+import (
+ "fmt"
+ "strconv"
+ "strings"
+)
+
+var ()
+
+// ClosureBusy is a store of ATNConfigs and is a tiny abstraction layer over
+// a standard JStore so that we can use Lazy instantiation of the JStore, mostly
+// to avoid polluting the stats module with a ton of JStore instances with nothing in them.
+type ClosureBusy struct {
+ bMap *JStore[*ATNConfig, Comparator[*ATNConfig]]
+ desc string
+}
+
+// NewClosureBusy creates a new ClosureBusy instance used to avoid infinite recursion for right-recursive rules
+func NewClosureBusy(desc string) *ClosureBusy {
+ return &ClosureBusy{
+ desc: desc,
+ }
+}
+
+func (c *ClosureBusy) Put(config *ATNConfig) (*ATNConfig, bool) {
+ if c.bMap == nil {
+ c.bMap = NewJStore[*ATNConfig, Comparator[*ATNConfig]](aConfEqInst, ClosureBusyCollection, c.desc)
+ }
+ return c.bMap.Put(config)
+}
+
+type ParserATNSimulator struct {
+ BaseATNSimulator
+
+ parser Parser
+ predictionMode int
+ input TokenStream
+ startIndex int
+ dfa *DFA
+ mergeCache *JPCMap
+ outerContext ParserRuleContext
+}
+
+//goland:noinspection GoUnusedExportedFunction
+func NewParserATNSimulator(parser Parser, atn *ATN, decisionToDFA []*DFA, sharedContextCache *PredictionContextCache) *ParserATNSimulator {
+
+ p := &ParserATNSimulator{
+ BaseATNSimulator: BaseATNSimulator{
+ atn: atn,
+ sharedContextCache: sharedContextCache,
+ },
+ }
+
+ p.parser = parser
+ p.decisionToDFA = decisionToDFA
+ // SLL, LL, or LL + exact ambig detection?//
+ p.predictionMode = PredictionModeLL
+ // LAME globals to avoid parameters!!!!! I need these down deep in predTransition
+ p.input = nil
+ p.startIndex = 0
+ p.outerContext = nil
+ p.dfa = nil
+ // Each prediction operation uses a cache for merge of prediction contexts.
+ // Don't keep around as it wastes huge amounts of memory. [JPCMap]
+ // isn't Synchronized, but we're ok since two threads shouldn't reuse same
+ // parser/atn-simulator object because it can only handle one input at a time.
+ // This maps graphs a and b to merged result c. (a,b) -> c. We can avoid
+ // the merge if we ever see a and b again. Note that (b,a) -> c should
+ // also be examined during cache lookup.
+ //
+ p.mergeCache = nil
+
+ return p
+}
+
+func (p *ParserATNSimulator) GetPredictionMode() int {
+ return p.predictionMode
+}
+
+func (p *ParserATNSimulator) SetPredictionMode(v int) {
+ p.predictionMode = v
+}
+
+func (p *ParserATNSimulator) reset() {
+}
+
+//goland:noinspection GoBoolExpressions
+func (p *ParserATNSimulator) AdaptivePredict(parser *BaseParser, input TokenStream, decision int, outerContext ParserRuleContext) int {
+ if runtimeConfig.parserATNSimulatorDebug || runtimeConfig.parserATNSimulatorTraceATNSim {
+ fmt.Println("adaptivePredict decision " + strconv.Itoa(decision) +
+ " exec LA(1)==" + p.getLookaheadName(input) +
+ " line " + strconv.Itoa(input.LT(1).GetLine()) + ":" +
+ strconv.Itoa(input.LT(1).GetColumn()))
+ }
+ p.input = input
+ p.startIndex = input.Index()
+ p.outerContext = outerContext
+
+ dfa := p.decisionToDFA[decision]
+ p.dfa = dfa
+ m := input.Mark()
+ index := input.Index()
+
+ defer func() {
+ p.dfa = nil
+ p.mergeCache = nil // whack cache after each prediction
+ // Do not attempt to run a GC now that we're done with the cache as makes the
+ // GC overhead terrible for badly formed grammars and has little effect on well formed
+ // grammars.
+ // I have made some extra effort to try and reduce memory pressure by reusing allocations when
+ // possible. However, it can only have a limited effect. The real solution is to encourage grammar
+ // authors to think more carefully about their grammar and to use the new antlr.stats tag to inspect
+ // what is happening at runtime, along with using the error listener to report ambiguities.
+
+ input.Seek(index)
+ input.Release(m)
+ }()
+
+ // Now we are certain to have a specific decision's DFA
+ // But, do we still need an initial state?
+ var s0 *DFAState
+ p.atn.stateMu.RLock()
+ if dfa.getPrecedenceDfa() {
+ p.atn.edgeMu.RLock()
+ // the start state for a precedence DFA depends on the current
+ // parser precedence, and is provided by a DFA method.
+ s0 = dfa.getPrecedenceStartState(p.parser.GetPrecedence())
+ p.atn.edgeMu.RUnlock()
+ } else {
+ // the start state for a "regular" DFA is just s0
+ s0 = dfa.getS0()
+ }
+ p.atn.stateMu.RUnlock()
+
+ if s0 == nil {
+ if outerContext == nil {
+ outerContext = ParserRuleContextEmpty
+ }
+ if runtimeConfig.parserATNSimulatorDebug {
+ fmt.Println("predictATN decision " + strconv.Itoa(dfa.decision) +
+ " exec LA(1)==" + p.getLookaheadName(input) +
+ ", outerContext=" + outerContext.String(p.parser.GetRuleNames(), nil))
+ }
+ fullCtx := false
+ s0Closure := p.computeStartState(dfa.atnStartState, ParserRuleContextEmpty, fullCtx)
+
+ p.atn.stateMu.Lock()
+ if dfa.getPrecedenceDfa() {
+ // If p is a precedence DFA, we use applyPrecedenceFilter
+ // to convert the computed start state to a precedence start
+ // state. We then use DFA.setPrecedenceStartState to set the
+ // appropriate start state for the precedence level rather
+ // than simply setting DFA.s0.
+ //
+ dfa.s0.configs = s0Closure
+ s0Closure = p.applyPrecedenceFilter(s0Closure)
+ s0 = p.addDFAState(dfa, NewDFAState(-1, s0Closure))
+ p.atn.edgeMu.Lock()
+ dfa.setPrecedenceStartState(p.parser.GetPrecedence(), s0)
+ p.atn.edgeMu.Unlock()
+ } else {
+ s0 = p.addDFAState(dfa, NewDFAState(-1, s0Closure))
+ dfa.setS0(s0)
+ }
+ p.atn.stateMu.Unlock()
+ }
+
+ alt, re := p.execATN(dfa, s0, input, index, outerContext)
+ parser.SetError(re)
+ if runtimeConfig.parserATNSimulatorDebug {
+ fmt.Println("DFA after predictATN: " + dfa.String(p.parser.GetLiteralNames(), nil))
+ }
+ return alt
+
+}
+
+// execATN performs ATN simulation to compute a predicted alternative based
+// upon the remaining input, but also updates the DFA cache to avoid
+// having to traverse the ATN again for the same input sequence.
+//
+// There are some key conditions we're looking for after computing a new
+// set of ATN configs (proposed DFA state):
+//
+// - If the set is empty, there is no viable alternative for current symbol
+// - Does the state uniquely predict an alternative?
+// - Does the state have a conflict that would prevent us from
+// putting it on the work list?
+//
+// We also have some key operations to do:
+//
+// - Add an edge from previous DFA state to potentially NewDFA state, D,
+// - Upon current symbol but only if adding to work list, which means in all
+// cases except no viable alternative (and possibly non-greedy decisions?)
+// - Collecting predicates and adding semantic context to DFA accept states
+// - adding rule context to context-sensitive DFA accept states
+// - Consuming an input symbol
+// - Reporting a conflict
+// - Reporting an ambiguity
+// - Reporting a context sensitivity
+// - Reporting insufficient predicates
+//
+// Cover these cases:
+//
+// - dead end
+// - single alt
+// - single alt + predicates
+// - conflict
+// - conflict + predicates
+//
+//goland:noinspection GoBoolExpressions
+func (p *ParserATNSimulator) execATN(dfa *DFA, s0 *DFAState, input TokenStream, startIndex int, outerContext ParserRuleContext) (int, RecognitionException) {
+
+ if runtimeConfig.parserATNSimulatorDebug || runtimeConfig.parserATNSimulatorTraceATNSim {
+ fmt.Println("execATN decision " + strconv.Itoa(dfa.decision) +
+ ", DFA state " + s0.String() +
+ ", LA(1)==" + p.getLookaheadName(input) +
+ " line " + strconv.Itoa(input.LT(1).GetLine()) + ":" + strconv.Itoa(input.LT(1).GetColumn()))
+ }
+
+ previousD := s0
+
+ if runtimeConfig.parserATNSimulatorDebug {
+ fmt.Println("s0 = " + s0.String())
+ }
+ t := input.LA(1)
+ for { // for more work
+ D := p.getExistingTargetState(previousD, t)
+ if D == nil {
+ D = p.computeTargetState(dfa, previousD, t)
+ }
+ if D == ATNSimulatorError {
+ // if any configs in previous dipped into outer context, that
+ // means that input up to t actually finished entry rule
+ // at least for SLL decision. Full LL doesn't dip into outer
+ // so don't need special case.
+ // We will get an error no matter what so delay until after
+ // decision better error message. Also, no reachable target
+ // ATN states in SLL implies LL will also get nowhere.
+ // If conflict in states that dip out, choose min since we
+ // will get error no matter what.
+ e := p.noViableAlt(input, outerContext, previousD.configs, startIndex)
+ input.Seek(startIndex)
+ alt := p.getSynValidOrSemInvalidAltThatFinishedDecisionEntryRule(previousD.configs, outerContext)
+ if alt != ATNInvalidAltNumber {
+ return alt, nil
+ }
+ p.parser.SetError(e)
+ return ATNInvalidAltNumber, e
+ }
+ if D.requiresFullContext && p.predictionMode != PredictionModeSLL {
+ // IF PREDS, MIGHT RESOLVE TO SINGLE ALT => SLL (or syntax error)
+ conflictingAlts := D.configs.conflictingAlts
+ if D.predicates != nil {
+ if runtimeConfig.parserATNSimulatorDebug {
+ fmt.Println("DFA state has preds in DFA sim LL fail-over")
+ }
+ conflictIndex := input.Index()
+ if conflictIndex != startIndex {
+ input.Seek(startIndex)
+ }
+ conflictingAlts = p.evalSemanticContext(D.predicates, outerContext, true)
+ if conflictingAlts.length() == 1 {
+ if runtimeConfig.parserATNSimulatorDebug {
+ fmt.Println("Full LL avoided")
+ }
+ return conflictingAlts.minValue(), nil
+ }
+ if conflictIndex != startIndex {
+ // restore the index so Reporting the fallback to full
+ // context occurs with the index at the correct spot
+ input.Seek(conflictIndex)
+ }
+ }
+ if runtimeConfig.parserATNSimulatorDFADebug {
+ fmt.Println("ctx sensitive state " + outerContext.String(nil, nil) + " in " + D.String())
+ }
+ fullCtx := true
+ s0Closure := p.computeStartState(dfa.atnStartState, outerContext, fullCtx)
+ p.ReportAttemptingFullContext(dfa, conflictingAlts, D.configs, startIndex, input.Index())
+ alt, re := p.execATNWithFullContext(dfa, D, s0Closure, input, startIndex, outerContext)
+ return alt, re
+ }
+ if D.isAcceptState {
+ if D.predicates == nil {
+ return D.prediction, nil
+ }
+ stopIndex := input.Index()
+ input.Seek(startIndex)
+ alts := p.evalSemanticContext(D.predicates, outerContext, true)
+
+ switch alts.length() {
+ case 0:
+ return ATNInvalidAltNumber, p.noViableAlt(input, outerContext, D.configs, startIndex)
+ case 1:
+ return alts.minValue(), nil
+ default:
+ // Report ambiguity after predicate evaluation to make sure the correct set of ambig alts is Reported.
+ p.ReportAmbiguity(dfa, D, startIndex, stopIndex, false, alts, D.configs)
+ return alts.minValue(), nil
+ }
+ }
+ previousD = D
+
+ if t != TokenEOF {
+ input.Consume()
+ t = input.LA(1)
+ }
+ }
+}
+
+// Get an existing target state for an edge in the DFA. If the target state
+// for the edge has not yet been computed or is otherwise not available,
+// p method returns {@code nil}.
+//
+// @param previousD The current DFA state
+// @param t The next input symbol
+// @return The existing target DFA state for the given input symbol
+// {@code t}, or {@code nil} if the target state for p edge is not
+// already cached
+
+func (p *ParserATNSimulator) getExistingTargetState(previousD *DFAState, t int) *DFAState {
+ if t+1 < 0 {
+ return nil
+ }
+
+ p.atn.edgeMu.RLock()
+ defer p.atn.edgeMu.RUnlock()
+ edges := previousD.getEdges()
+ if edges == nil || t+1 >= len(edges) {
+ return nil
+ }
+ return previousD.getIthEdge(t + 1)
+}
+
+// Compute a target state for an edge in the DFA, and attempt to add the
+// computed state and corresponding edge to the DFA.
+//
+// @param dfa The DFA
+// @param previousD The current DFA state
+// @param t The next input symbol
+//
+// @return The computed target DFA state for the given input symbol
+// {@code t}. If {@code t} does not lead to a valid DFA state, p method
+// returns {@link //ERROR}.
+//
+//goland:noinspection GoBoolExpressions
+func (p *ParserATNSimulator) computeTargetState(dfa *DFA, previousD *DFAState, t int) *DFAState {
+ reach := p.computeReachSet(previousD.configs, t, false)
+
+ if reach == nil {
+ p.addDFAEdge(dfa, previousD, t, ATNSimulatorError)
+ return ATNSimulatorError
+ }
+ // create new target state we'll add to DFA after it's complete
+ D := NewDFAState(-1, reach)
+
+ predictedAlt := p.getUniqueAlt(reach)
+
+ if runtimeConfig.parserATNSimulatorDebug {
+ altSubSets := PredictionModegetConflictingAltSubsets(reach)
+ fmt.Println("SLL altSubSets=" + fmt.Sprint(altSubSets) +
+ ", previous=" + previousD.configs.String() +
+ ", configs=" + reach.String() +
+ ", predict=" + strconv.Itoa(predictedAlt) +
+ ", allSubsetsConflict=" +
+ fmt.Sprint(PredictionModeallSubsetsConflict(altSubSets)) +
+ ", conflictingAlts=" + p.getConflictingAlts(reach).String())
+ }
+ if predictedAlt != ATNInvalidAltNumber {
+ // NO CONFLICT, UNIQUELY PREDICTED ALT
+ D.isAcceptState = true
+ D.configs.uniqueAlt = predictedAlt
+ D.setPrediction(predictedAlt)
+ } else if PredictionModehasSLLConflictTerminatingPrediction(p.predictionMode, reach) {
+ // MORE THAN ONE VIABLE ALTERNATIVE
+ D.configs.conflictingAlts = p.getConflictingAlts(reach)
+ D.requiresFullContext = true
+ // in SLL-only mode, we will stop at p state and return the minimum alt
+ D.isAcceptState = true
+ D.setPrediction(D.configs.conflictingAlts.minValue())
+ }
+ if D.isAcceptState && D.configs.hasSemanticContext {
+ p.predicateDFAState(D, p.atn.getDecisionState(dfa.decision))
+ if D.predicates != nil {
+ D.setPrediction(ATNInvalidAltNumber)
+ }
+ }
+ // all adds to dfa are done after we've created full D state
+ D = p.addDFAEdge(dfa, previousD, t, D)
+ return D
+}
+
+func (p *ParserATNSimulator) predicateDFAState(dfaState *DFAState, decisionState DecisionState) {
+ // We need to test all predicates, even in DFA states that
+ // uniquely predict alternative.
+ nalts := len(decisionState.GetTransitions())
+ // Update DFA so reach becomes accept state with (predicate,alt)
+ // pairs if preds found for conflicting alts
+ altsToCollectPredsFrom := p.getConflictingAltsOrUniqueAlt(dfaState.configs)
+ altToPred := p.getPredsForAmbigAlts(altsToCollectPredsFrom, dfaState.configs, nalts)
+ if altToPred != nil {
+ dfaState.predicates = p.getPredicatePredictions(altsToCollectPredsFrom, altToPred)
+ dfaState.setPrediction(ATNInvalidAltNumber) // make sure we use preds
+ } else {
+ // There are preds in configs but they might go away
+ // when OR'd together like {p}? || NONE == NONE. If neither
+ // alt has preds, resolve to min alt
+ dfaState.setPrediction(altsToCollectPredsFrom.minValue())
+ }
+}
+
+// comes back with reach.uniqueAlt set to a valid alt
+//
+//goland:noinspection GoBoolExpressions
+func (p *ParserATNSimulator) execATNWithFullContext(dfa *DFA, D *DFAState, s0 *ATNConfigSet, input TokenStream, startIndex int, outerContext ParserRuleContext) (int, RecognitionException) {
+
+ if runtimeConfig.parserATNSimulatorDebug || runtimeConfig.parserATNSimulatorTraceATNSim {
+ fmt.Println("execATNWithFullContext " + s0.String())
+ }
+
+ fullCtx := true
+ foundExactAmbig := false
+ var reach *ATNConfigSet
+ previous := s0
+ input.Seek(startIndex)
+ t := input.LA(1)
+ predictedAlt := -1
+
+ for { // for more work
+ reach = p.computeReachSet(previous, t, fullCtx)
+ if reach == nil {
+ // if any configs in previous dipped into outer context, that
+ // means that input up to t actually finished entry rule
+ // at least for LL decision. Full LL doesn't dip into outer
+ // so don't need special case.
+ // We will get an error no matter what so delay until after
+ // decision better error message. Also, no reachable target
+ // ATN states in SLL implies LL will also get nowhere.
+ // If conflict in states that dip out, choose min since we
+ // will get error no matter what.
+ input.Seek(startIndex)
+ alt := p.getSynValidOrSemInvalidAltThatFinishedDecisionEntryRule(previous, outerContext)
+ if alt != ATNInvalidAltNumber {
+ return alt, nil
+ }
+ return alt, p.noViableAlt(input, outerContext, previous, startIndex)
+ }
+ altSubSets := PredictionModegetConflictingAltSubsets(reach)
+ if runtimeConfig.parserATNSimulatorDebug {
+ fmt.Println("LL altSubSets=" + fmt.Sprint(altSubSets) + ", predict=" +
+ strconv.Itoa(PredictionModegetUniqueAlt(altSubSets)) + ", resolvesToJustOneViableAlt=" +
+ fmt.Sprint(PredictionModeresolvesToJustOneViableAlt(altSubSets)))
+ }
+ reach.uniqueAlt = p.getUniqueAlt(reach)
+ // unique prediction?
+ if reach.uniqueAlt != ATNInvalidAltNumber {
+ predictedAlt = reach.uniqueAlt
+ break
+ }
+ if p.predictionMode != PredictionModeLLExactAmbigDetection {
+ predictedAlt = PredictionModeresolvesToJustOneViableAlt(altSubSets)
+ if predictedAlt != ATNInvalidAltNumber {
+ break
+ }
+ } else {
+ // In exact ambiguity mode, we never try to terminate early.
+ // Just keeps scarfing until we know what the conflict is
+ if PredictionModeallSubsetsConflict(altSubSets) && PredictionModeallSubsetsEqual(altSubSets) {
+ foundExactAmbig = true
+ predictedAlt = PredictionModegetSingleViableAlt(altSubSets)
+ break
+ }
+ // else there are multiple non-conflicting subsets or
+ // we're not sure what the ambiguity is yet.
+ // So, keep going.
+ }
+ previous = reach
+ if t != TokenEOF {
+ input.Consume()
+ t = input.LA(1)
+ }
+ }
+ // If the configuration set uniquely predicts an alternative,
+ // without conflict, then we know that it's a full LL decision
+ // not SLL.
+ if reach.uniqueAlt != ATNInvalidAltNumber {
+ p.ReportContextSensitivity(dfa, predictedAlt, reach, startIndex, input.Index())
+ return predictedAlt, nil
+ }
+ // We do not check predicates here because we have checked them
+ // on-the-fly when doing full context prediction.
+
+ //
+ // In non-exact ambiguity detection mode, we might actually be able to
+ // detect an exact ambiguity, but I'm not going to spend the cycles
+ // needed to check. We only emit ambiguity warnings in exact ambiguity
+ // mode.
+ //
+ // For example, we might know that we have conflicting configurations.
+ // But, that does not mean that there is no way forward without a
+ // conflict. It's possible to have non-conflicting alt subsets as in:
+ //
+ // altSubSets=[{1, 2}, {1, 2}, {1}, {1, 2}]
+ //
+ // from
+ //
+ // [(17,1,[5 $]), (13,1,[5 10 $]), (21,1,[5 10 $]), (11,1,[$]),
+ // (13,2,[5 10 $]), (21,2,[5 10 $]), (11,2,[$])]
+ //
+ // In p case, (17,1,[5 $]) indicates there is some next sequence that
+ // would resolve p without conflict to alternative 1. Any other viable
+ // next sequence, however, is associated with a conflict. We stop
+ // looking for input because no amount of further lookahead will alter
+ // the fact that we should predict alternative 1. We just can't say for
+ // sure that there is an ambiguity without looking further.
+
+ p.ReportAmbiguity(dfa, D, startIndex, input.Index(), foundExactAmbig, reach.Alts(), reach)
+
+ return predictedAlt, nil
+}
+
+//goland:noinspection GoBoolExpressions
+func (p *ParserATNSimulator) computeReachSet(closure *ATNConfigSet, t int, fullCtx bool) *ATNConfigSet {
+ if p.mergeCache == nil {
+ p.mergeCache = NewJPCMap(ReachSetCollection, "Merge cache for computeReachSet()")
+ }
+ intermediate := NewATNConfigSet(fullCtx)
+
+ // Configurations already in a rule stop state indicate reaching the end
+ // of the decision rule (local context) or end of the start rule (full
+ // context). Once reached, these configurations are never updated by a
+ // closure operation, so they are handled separately for the performance
+ // advantage of having a smaller intermediate set when calling closure.
+ //
+ // For full-context reach operations, separate handling is required to
+ // ensure that the alternative Matching the longest overall sequence is
+ // chosen when multiple such configurations can Match the input.
+
+ var skippedStopStates []*ATNConfig
+
+ // First figure out where we can reach on input t
+ for _, c := range closure.configs {
+ if runtimeConfig.parserATNSimulatorDebug {
+ fmt.Println("testing " + p.GetTokenName(t) + " at " + c.String())
+ }
+
+ if _, ok := c.GetState().(*RuleStopState); ok {
+ if fullCtx || t == TokenEOF {
+ skippedStopStates = append(skippedStopStates, c)
+ if runtimeConfig.parserATNSimulatorDebug {
+ fmt.Println("added " + c.String() + " to SkippedStopStates")
+ }
+ }
+ continue
+ }
+
+ for _, trans := range c.GetState().GetTransitions() {
+ target := p.getReachableTarget(trans, t)
+ if target != nil {
+ cfg := NewATNConfig4(c, target)
+ intermediate.Add(cfg, p.mergeCache)
+ if runtimeConfig.parserATNSimulatorDebug {
+ fmt.Println("added " + cfg.String() + " to intermediate")
+ }
+ }
+ }
+ }
+
+ // Now figure out where the reach operation can take us...
+ var reach *ATNConfigSet
+
+ // This block optimizes the reach operation for intermediate sets which
+ // trivially indicate a termination state for the overall
+ // AdaptivePredict operation.
+ //
+ // The conditions assume that intermediate
+ // contains all configurations relevant to the reach set, but p
+ // condition is not true when one or more configurations have been
+ // withheld in SkippedStopStates, or when the current symbol is EOF.
+ //
+ if skippedStopStates == nil && t != TokenEOF {
+ if len(intermediate.configs) == 1 {
+ // Don't pursue the closure if there is just one state.
+ // It can only have one alternative just add to result
+ // Also don't pursue the closure if there is unique alternative
+ // among the configurations.
+ reach = intermediate
+ } else if p.getUniqueAlt(intermediate) != ATNInvalidAltNumber {
+ // Also don't pursue the closure if there is unique alternative
+ // among the configurations.
+ reach = intermediate
+ }
+ }
+ // If the reach set could not be trivially determined, perform a closure
+ // operation on the intermediate set to compute its initial value.
+ //
+ if reach == nil {
+ reach = NewATNConfigSet(fullCtx)
+ closureBusy := NewClosureBusy("ParserATNSimulator.computeReachSet() make a closureBusy")
+ treatEOFAsEpsilon := t == TokenEOF
+ amount := len(intermediate.configs)
+ for k := 0; k < amount; k++ {
+ p.closure(intermediate.configs[k], reach, closureBusy, false, fullCtx, treatEOFAsEpsilon)
+ }
+ }
+ if t == TokenEOF {
+ // After consuming EOF no additional input is possible, so we are
+ // only interested in configurations which reached the end of the
+ // decision rule (local context) or end of the start rule (full
+ // context). Update reach to contain only these configurations. This
+ // handles both explicit EOF transitions in the grammar and implicit
+ // EOF transitions following the end of the decision or start rule.
+ //
+ // When reach==intermediate, no closure operation was performed. In
+ // p case, removeAllConfigsNotInRuleStopState needs to check for
+ // reachable rule stop states as well as configurations already in
+ // a rule stop state.
+ //
+ // This is handled before the configurations in SkippedStopStates,
+ // because any configurations potentially added from that list are
+ // already guaranteed to meet this condition whether it's
+ // required.
+ //
+ reach = p.removeAllConfigsNotInRuleStopState(reach, reach.Equals(intermediate))
+ }
+ // If SkippedStopStates!=nil, then it contains at least one
+ // configuration. For full-context reach operations, these
+ // configurations reached the end of the start rule, in which case we
+ // only add them back to reach if no configuration during the current
+ // closure operation reached such a state. This ensures AdaptivePredict
+ // chooses an alternative Matching the longest overall sequence when
+ // multiple alternatives are viable.
+ //
+ if skippedStopStates != nil && ((!fullCtx) || (!PredictionModehasConfigInRuleStopState(reach))) {
+ for l := 0; l < len(skippedStopStates); l++ {
+ reach.Add(skippedStopStates[l], p.mergeCache)
+ }
+ }
+
+ if runtimeConfig.parserATNSimulatorTraceATNSim {
+ fmt.Println("computeReachSet " + closure.String() + " -> " + reach.String())
+ }
+
+ if len(reach.configs) == 0 {
+ return nil
+ }
+
+ return reach
+}
+
+// removeAllConfigsNotInRuleStopState returns a configuration set containing only the configurations from
+// configs which are in a [RuleStopState]. If all
+// configurations in configs are already in a rule stop state, this
+// method simply returns configs.
+//
+// When lookToEndOfRule is true, this method uses
+// [ATN].[NextTokens] for each configuration in configs which is
+// not already in a rule stop state to see if a rule stop state is reachable
+// from the configuration via epsilon-only transitions.
+//
+// When lookToEndOfRule is true, this method checks for rule stop states
+// reachable by epsilon-only transitions from each configuration in
+// configs.
+//
+// The func returns configs if all configurations in configs are in a
+// rule stop state, otherwise it returns a new configuration set containing only
+// the configurations from configs which are in a rule stop state
+func (p *ParserATNSimulator) removeAllConfigsNotInRuleStopState(configs *ATNConfigSet, lookToEndOfRule bool) *ATNConfigSet {
+ if PredictionModeallConfigsInRuleStopStates(configs) {
+ return configs
+ }
+ result := NewATNConfigSet(configs.fullCtx)
+ for _, config := range configs.configs {
+ if _, ok := config.GetState().(*RuleStopState); ok {
+ result.Add(config, p.mergeCache)
+ continue
+ }
+ if lookToEndOfRule && config.GetState().GetEpsilonOnlyTransitions() {
+ NextTokens := p.atn.NextTokens(config.GetState(), nil)
+ if NextTokens.contains(TokenEpsilon) {
+ endOfRuleState := p.atn.ruleToStopState[config.GetState().GetRuleIndex()]
+ result.Add(NewATNConfig4(config, endOfRuleState), p.mergeCache)
+ }
+ }
+ }
+ return result
+}
+
+//goland:noinspection GoBoolExpressions
+func (p *ParserATNSimulator) computeStartState(a ATNState, ctx RuleContext, fullCtx bool) *ATNConfigSet {
+ // always at least the implicit call to start rule
+ initialContext := predictionContextFromRuleContext(p.atn, ctx)
+ configs := NewATNConfigSet(fullCtx)
+ if runtimeConfig.parserATNSimulatorDebug || runtimeConfig.parserATNSimulatorTraceATNSim {
+ fmt.Println("computeStartState from ATN state " + a.String() +
+ " initialContext=" + initialContext.String())
+ }
+
+ for i := 0; i < len(a.GetTransitions()); i++ {
+ target := a.GetTransitions()[i].getTarget()
+ c := NewATNConfig6(target, i+1, initialContext)
+ closureBusy := NewClosureBusy("ParserATNSimulator.computeStartState() make a closureBusy")
+ p.closure(c, configs, closureBusy, true, fullCtx, false)
+ }
+ return configs
+}
+
+// applyPrecedenceFilter transforms the start state computed by
+// [computeStartState] to the special start state used by a
+// precedence [DFA] for a particular precedence value. The transformation
+// process applies the following changes to the start state's configuration
+// set.
+//
+// 1. Evaluate the precedence predicates for each configuration using
+// [SemanticContext].evalPrecedence.
+// 2. Remove all configurations which predict an alternative greater than
+// 1, for which another configuration that predicts alternative 1 is in the
+// same ATN state with the same prediction context.
+//
+// Transformation 2 is valid for the following reasons:
+//
+// - The closure block cannot contain any epsilon transitions which bypass
+// the body of the closure, so all states reachable via alternative 1 are
+// part of the precedence alternatives of the transformed left-recursive
+// rule.
+// - The "primary" portion of a left recursive rule cannot contain an
+// epsilon transition, so the only way an alternative other than 1 can exist
+// in a state that is also reachable via alternative 1 is by nesting calls
+// to the left-recursive rule, with the outer calls not being at the
+// preferred precedence level.
+//
+// The prediction context must be considered by this filter to address
+// situations like the following:
+//
+// grammar TA
+// prog: statement* EOF
+// statement: letterA | statement letterA 'b'
+// letterA: 'a'
+//
+// In the above grammar, the [ATN] state immediately before the token
+// reference 'a' in letterA is reachable from the left edge
+// of both the primary and closure blocks of the left-recursive rule
+// statement. The prediction context associated with each of these
+// configurations distinguishes between them, and prevents the alternative
+// which stepped out to prog, and then back in to statement
+// from being eliminated by the filter.
+//
+// The func returns the transformed configuration set representing the start state
+// for a precedence [DFA] at a particular precedence level (determined by
+// calling [Parser].getPrecedence).
+func (p *ParserATNSimulator) applyPrecedenceFilter(configs *ATNConfigSet) *ATNConfigSet {
+
+ statesFromAlt1 := make(map[int]*PredictionContext)
+ configSet := NewATNConfigSet(configs.fullCtx)
+
+ for _, config := range configs.configs {
+ // handle alt 1 first
+ if config.GetAlt() != 1 {
+ continue
+ }
+ updatedContext := config.GetSemanticContext().evalPrecedence(p.parser, p.outerContext)
+ if updatedContext == nil {
+ // the configuration was eliminated
+ continue
+ }
+ statesFromAlt1[config.GetState().GetStateNumber()] = config.GetContext()
+ if updatedContext != config.GetSemanticContext() {
+ configSet.Add(NewATNConfig2(config, updatedContext), p.mergeCache)
+ } else {
+ configSet.Add(config, p.mergeCache)
+ }
+ }
+ for _, config := range configs.configs {
+
+ if config.GetAlt() == 1 {
+ // already handled
+ continue
+ }
+ // In the future, p elimination step could be updated to also
+ // filter the prediction context for alternatives predicting alt>1
+ // (basically a graph subtraction algorithm).
+ if !config.getPrecedenceFilterSuppressed() {
+ context := statesFromAlt1[config.GetState().GetStateNumber()]
+ if context != nil && context.Equals(config.GetContext()) {
+ // eliminated
+ continue
+ }
+ }
+ configSet.Add(config, p.mergeCache)
+ }
+ return configSet
+}
+
+func (p *ParserATNSimulator) getReachableTarget(trans Transition, ttype int) ATNState {
+ if trans.Matches(ttype, 0, p.atn.maxTokenType) {
+ return trans.getTarget()
+ }
+
+ return nil
+}
+
+//goland:noinspection GoBoolExpressions
+func (p *ParserATNSimulator) getPredsForAmbigAlts(ambigAlts *BitSet, configs *ATNConfigSet, nalts int) []SemanticContext {
+
+ altToPred := make([]SemanticContext, nalts+1)
+ for _, c := range configs.configs {
+ if ambigAlts.contains(c.GetAlt()) {
+ altToPred[c.GetAlt()] = SemanticContextorContext(altToPred[c.GetAlt()], c.GetSemanticContext())
+ }
+ }
+ nPredAlts := 0
+ for i := 1; i <= nalts; i++ {
+ pred := altToPred[i]
+ if pred == nil {
+ altToPred[i] = SemanticContextNone
+ } else if pred != SemanticContextNone {
+ nPredAlts++
+ }
+ }
+ // unambiguous alts are nil in altToPred
+ if nPredAlts == 0 {
+ altToPred = nil
+ }
+ if runtimeConfig.parserATNSimulatorDebug {
+ fmt.Println("getPredsForAmbigAlts result " + fmt.Sprint(altToPred))
+ }
+ return altToPred
+}
+
+func (p *ParserATNSimulator) getPredicatePredictions(ambigAlts *BitSet, altToPred []SemanticContext) []*PredPrediction {
+ pairs := make([]*PredPrediction, 0)
+ containsPredicate := false
+ for i := 1; i < len(altToPred); i++ {
+ pred := altToPred[i]
+ // un-predicated is indicated by SemanticContextNONE
+ if ambigAlts != nil && ambigAlts.contains(i) {
+ pairs = append(pairs, NewPredPrediction(pred, i))
+ }
+ if pred != SemanticContextNone {
+ containsPredicate = true
+ }
+ }
+ if !containsPredicate {
+ return nil
+ }
+ return pairs
+}
+
+// getSynValidOrSemInvalidAltThatFinishedDecisionEntryRule is used to improve the localization of error messages by
+// choosing an alternative rather than panic a NoViableAltException in particular prediction scenarios where the
+// Error state was reached during [ATN] simulation.
+//
+// The default implementation of this method uses the following
+// algorithm to identify an [ATN] configuration which successfully parsed the
+// decision entry rule. Choosing such an alternative ensures that the
+// [ParserRuleContext] returned by the calling rule will be complete
+// and valid, and the syntax error will be Reported later at a more
+// localized location.
+//
+// - If a syntactically valid path or paths reach the end of the decision rule, and
+// they are semantically valid if predicated, return the min associated alt.
+// - Else, if a semantically invalid but syntactically valid path exist
+// or paths exist, return the minimum associated alt.
+// - Otherwise, return [ATNInvalidAltNumber].
+//
+// In some scenarios, the algorithm described above could predict an
+// alternative which will result in a [FailedPredicateException] in
+// the parser. Specifically, this could occur if the only configuration
+// capable of successfully parsing to the end of the decision rule is
+// blocked by a semantic predicate. By choosing this alternative within
+// [AdaptivePredict] instead of panic a [NoViableAltException], the resulting
+// [FailedPredicateException] in the parser will identify the specific
+// predicate which is preventing the parser from successfully parsing the
+// decision rule, which helps developers identify and correct logic errors
+// in semantic predicates.
+//
+// pass in the configs holding ATN configurations which were valid immediately before
+// the ERROR state was reached, outerContext as the initial parser context from the paper
+// or the parser stack at the instant before prediction commences.
+//
+// Teh func returns the value to return from [AdaptivePredict], or
+// [ATNInvalidAltNumber] if a suitable alternative was not
+// identified and [AdaptivePredict] should report an error instead.
+func (p *ParserATNSimulator) getSynValidOrSemInvalidAltThatFinishedDecisionEntryRule(configs *ATNConfigSet, outerContext ParserRuleContext) int {
+ cfgs := p.splitAccordingToSemanticValidity(configs, outerContext)
+ semValidConfigs := cfgs[0]
+ semInvalidConfigs := cfgs[1]
+ alt := p.GetAltThatFinishedDecisionEntryRule(semValidConfigs)
+ if alt != ATNInvalidAltNumber { // semantically/syntactically viable path exists
+ return alt
+ }
+ // Is there a syntactically valid path with a failed pred?
+ if len(semInvalidConfigs.configs) > 0 {
+ alt = p.GetAltThatFinishedDecisionEntryRule(semInvalidConfigs)
+ if alt != ATNInvalidAltNumber { // syntactically viable path exists
+ return alt
+ }
+ }
+ return ATNInvalidAltNumber
+}
+
+func (p *ParserATNSimulator) GetAltThatFinishedDecisionEntryRule(configs *ATNConfigSet) int {
+ alts := NewIntervalSet()
+
+ for _, c := range configs.configs {
+ _, ok := c.GetState().(*RuleStopState)
+
+ if c.GetReachesIntoOuterContext() > 0 || (ok && c.GetContext().hasEmptyPath()) {
+ alts.addOne(c.GetAlt())
+ }
+ }
+ if alts.length() == 0 {
+ return ATNInvalidAltNumber
+ }
+
+ return alts.first()
+}
+
+// Walk the list of configurations and split them according to
+// those that have preds evaluating to true/false. If no pred, assume
+// true pred and include in succeeded set. Returns Pair of sets.
+//
+// Create a NewSet so as not to alter the incoming parameter.
+//
+// Assumption: the input stream has been restored to the starting point
+// prediction, which is where predicates need to evaluate.
+
+type ATNConfigSetPair struct {
+ item0, item1 *ATNConfigSet
+}
+
+func (p *ParserATNSimulator) splitAccordingToSemanticValidity(configs *ATNConfigSet, outerContext ParserRuleContext) []*ATNConfigSet {
+ succeeded := NewATNConfigSet(configs.fullCtx)
+ failed := NewATNConfigSet(configs.fullCtx)
+
+ for _, c := range configs.configs {
+ if c.GetSemanticContext() != SemanticContextNone {
+ predicateEvaluationResult := c.GetSemanticContext().evaluate(p.parser, outerContext)
+ if predicateEvaluationResult {
+ succeeded.Add(c, nil)
+ } else {
+ failed.Add(c, nil)
+ }
+ } else {
+ succeeded.Add(c, nil)
+ }
+ }
+ return []*ATNConfigSet{succeeded, failed}
+}
+
+// evalSemanticContext looks through a list of predicate/alt pairs, returning alts for the
+// pairs that win. A [SemanticContextNone] predicate indicates an alt containing an
+// un-predicated runtimeConfig which behaves as "always true." If !complete
+// then we stop at the first predicate that evaluates to true. This
+// includes pairs with nil predicates.
+//
+//goland:noinspection GoBoolExpressions
+func (p *ParserATNSimulator) evalSemanticContext(predPredictions []*PredPrediction, outerContext ParserRuleContext, complete bool) *BitSet {
+ predictions := NewBitSet()
+ for i := 0; i < len(predPredictions); i++ {
+ pair := predPredictions[i]
+ if pair.pred == SemanticContextNone {
+ predictions.add(pair.alt)
+ if !complete {
+ break
+ }
+ continue
+ }
+
+ predicateEvaluationResult := pair.pred.evaluate(p.parser, outerContext)
+ if runtimeConfig.parserATNSimulatorDebug || runtimeConfig.parserATNSimulatorDFADebug {
+ fmt.Println("eval pred " + pair.String() + "=" + fmt.Sprint(predicateEvaluationResult))
+ }
+ if predicateEvaluationResult {
+ if runtimeConfig.parserATNSimulatorDebug || runtimeConfig.parserATNSimulatorDFADebug {
+ fmt.Println("PREDICT " + fmt.Sprint(pair.alt))
+ }
+ predictions.add(pair.alt)
+ if !complete {
+ break
+ }
+ }
+ }
+ return predictions
+}
+
+func (p *ParserATNSimulator) closure(config *ATNConfig, configs *ATNConfigSet, closureBusy *ClosureBusy, collectPredicates, fullCtx, treatEOFAsEpsilon bool) {
+ initialDepth := 0
+ p.closureCheckingStopState(config, configs, closureBusy, collectPredicates,
+ fullCtx, initialDepth, treatEOFAsEpsilon)
+}
+
+func (p *ParserATNSimulator) closureCheckingStopState(config *ATNConfig, configs *ATNConfigSet, closureBusy *ClosureBusy, collectPredicates, fullCtx bool, depth int, treatEOFAsEpsilon bool) {
+ if runtimeConfig.parserATNSimulatorTraceATNSim {
+ fmt.Println("closure(" + config.String() + ")")
+ }
+
+ var stack []*ATNConfig
+ visited := make(map[*ATNConfig]bool)
+
+ stack = append(stack, config)
+
+ for len(stack) > 0 {
+ currConfig := stack[len(stack)-1]
+ stack = stack[:len(stack)-1]
+
+ if _, ok := visited[currConfig]; ok {
+ continue
+ }
+ visited[currConfig] = true
+
+ if _, ok := currConfig.GetState().(*RuleStopState); ok {
+ // We hit rule end. If we have context info, use it
+ // run thru all possible stack tops in ctx
+ if !currConfig.GetContext().isEmpty() {
+ for i := 0; i < currConfig.GetContext().length(); i++ {
+ if currConfig.GetContext().getReturnState(i) == BasePredictionContextEmptyReturnState {
+ if fullCtx {
+ nb := NewATNConfig1(currConfig, currConfig.GetState(), BasePredictionContextEMPTY)
+ configs.Add(nb, p.mergeCache)
+ continue
+ } else {
+ // we have no context info, just chase follow links (if greedy)
+ if runtimeConfig.parserATNSimulatorDebug {
+ fmt.Println("FALLING off rule " + p.getRuleName(currConfig.GetState().GetRuleIndex()))
+ }
+ p.closureWork(currConfig, configs, closureBusy, collectPredicates, fullCtx, depth, treatEOFAsEpsilon)
+ }
+ continue
+ }
+ returnState := p.atn.states[currConfig.GetContext().getReturnState(i)]
+ newContext := currConfig.GetContext().GetParent(i) // "pop" return state
+
+ c := NewATNConfig5(returnState, currConfig.GetAlt(), newContext, currConfig.GetSemanticContext())
+ // While we have context to pop back from, we may have
+ // gotten that context AFTER having falling off a rule.
+ // Make sure we track that we are now out of context.
+ c.SetReachesIntoOuterContext(currConfig.GetReachesIntoOuterContext())
+
+ stack = append(stack, c)
+ }
+ continue
+ } else if fullCtx {
+ // reached end of start rule
+ configs.Add(currConfig, p.mergeCache)
+ continue
+ } else {
+ // else if we have no context info, just chase follow links (if greedy)
+ if runtimeConfig.parserATNSimulatorDebug {
+ fmt.Println("FALLING off rule " + p.getRuleName(currConfig.GetState().GetRuleIndex()))
+ }
+ }
+ }
+
+ p.closureWork(currConfig, configs, closureBusy, collectPredicates, fullCtx, depth, treatEOFAsEpsilon)
+ }
+}
+
+//goland:noinspection GoBoolExpressions
+func (p *ParserATNSimulator) closureCheckingStopStateRecursive(config *ATNConfig, configs *ATNConfigSet, closureBusy *ClosureBusy, collectPredicates, fullCtx bool, depth int, treatEOFAsEpsilon bool) {
+ if runtimeConfig.parserATNSimulatorTraceATNSim {
+ fmt.Println("closure(" + config.String() + ")")
+ }
+
+ if _, ok := config.GetState().(*RuleStopState); ok {
+ // We hit rule end. If we have context info, use it
+ // run thru all possible stack tops in ctx
+ if !config.GetContext().isEmpty() {
+ for i := 0; i < config.GetContext().length(); i++ {
+ if config.GetContext().getReturnState(i) == BasePredictionContextEmptyReturnState {
+ if fullCtx {
+ nb := NewATNConfig1(config, config.GetState(), BasePredictionContextEMPTY)
+ configs.Add(nb, p.mergeCache)
+ continue
+ } else {
+ // we have no context info, just chase follow links (if greedy)
+ if runtimeConfig.parserATNSimulatorDebug {
+ fmt.Println("FALLING off rule " + p.getRuleName(config.GetState().GetRuleIndex()))
+ }
+ p.closureWork(config, configs, closureBusy, collectPredicates, fullCtx, depth, treatEOFAsEpsilon)
+ }
+ continue
+ }
+ returnState := p.atn.states[config.GetContext().getReturnState(i)]
+ newContext := config.GetContext().GetParent(i) // "pop" return state
+
+ c := NewATNConfig5(returnState, config.GetAlt(), newContext, config.GetSemanticContext())
+ // While we have context to pop back from, we may have
+ // gotten that context AFTER having falling off a rule.
+ // Make sure we track that we are now out of context.
+ c.SetReachesIntoOuterContext(config.GetReachesIntoOuterContext())
+ p.closureCheckingStopState(c, configs, closureBusy, collectPredicates, fullCtx, depth-1, treatEOFAsEpsilon)
+ }
+ return
+ } else if fullCtx {
+ // reached end of start rule
+ configs.Add(config, p.mergeCache)
+ return
+ } else {
+ // else if we have no context info, just chase follow links (if greedy)
+ if runtimeConfig.parserATNSimulatorDebug {
+ fmt.Println("FALLING off rule " + p.getRuleName(config.GetState().GetRuleIndex()))
+ }
+ }
+ }
+ p.closureWork(config, configs, closureBusy, collectPredicates, fullCtx, depth, treatEOFAsEpsilon)
+}
+
+// Do the actual work of walking epsilon edges
+//
+//goland:noinspection GoBoolExpressions
+func (p *ParserATNSimulator) closureWork(config *ATNConfig, configs *ATNConfigSet, closureBusy *ClosureBusy, collectPredicates, fullCtx bool, depth int, treatEOFAsEpsilon bool) {
+ state := config.GetState()
+ // optimization
+ if !state.GetEpsilonOnlyTransitions() {
+ configs.Add(config, p.mergeCache)
+ // make sure to not return here, because EOF transitions can act as
+ // both epsilon transitions and non-epsilon transitions.
+ }
+ for i := 0; i < len(state.GetTransitions()); i++ {
+ if i == 0 && p.canDropLoopEntryEdgeInLeftRecursiveRule(config) {
+ continue
+ }
+
+ t := state.GetTransitions()[i]
+ _, ok := t.(*ActionTransition)
+ continueCollecting := collectPredicates && !ok
+ c := p.getEpsilonTarget(config, t, continueCollecting, depth == 0, fullCtx, treatEOFAsEpsilon)
+ if c != nil {
+ newDepth := depth
+
+ if _, ok := config.GetState().(*RuleStopState); ok {
+ // target fell off end of rule mark resulting c as having dipped into outer context
+ // We can't get here if incoming config was rule stop and we had context
+ // track how far we dip into outer context. Might
+ // come in handy and we avoid evaluating context dependent
+ // preds if this is > 0.
+
+ if p.dfa != nil && p.dfa.getPrecedenceDfa() {
+ if t.(*EpsilonTransition).outermostPrecedenceReturn == p.dfa.atnStartState.GetRuleIndex() {
+ c.setPrecedenceFilterSuppressed(true)
+ }
+ }
+
+ c.SetReachesIntoOuterContext(c.GetReachesIntoOuterContext() + 1)
+
+ _, present := closureBusy.Put(c)
+ if present {
+ // avoid infinite recursion for right-recursive rules
+ continue
+ }
+
+ configs.dipsIntoOuterContext = true // TODO: can remove? only care when we add to set per middle of this method
+ newDepth--
+ if runtimeConfig.parserATNSimulatorDebug {
+ fmt.Println("dips into outer ctx: " + c.String())
+ }
+ } else {
+
+ if !t.getIsEpsilon() {
+ _, present := closureBusy.Put(c)
+ if present {
+ // avoid infinite recursion for EOF* and EOF+
+ continue
+ }
+ }
+ if _, ok := t.(*RuleTransition); ok {
+ // latch when newDepth goes negative - once we step out of the entry context we can't return
+ if newDepth >= 0 {
+ newDepth++
+ }
+ }
+ }
+ p.closureCheckingStopState(c, configs, closureBusy, continueCollecting, fullCtx, newDepth, treatEOFAsEpsilon)
+ }
+ }
+}
+
+//goland:noinspection GoBoolExpressions
+func (p *ParserATNSimulator) canDropLoopEntryEdgeInLeftRecursiveRule(config *ATNConfig) bool {
+ if !runtimeConfig.lRLoopEntryBranchOpt {
+ return false
+ }
+
+ _p := config.GetState()
+
+ // First check to see if we are in StarLoopEntryState generated during
+ // left-recursion elimination. For efficiency, also check if
+ // the context has an empty stack case. If so, it would mean
+ // global FOLLOW so we can't perform optimization
+ if _p.GetStateType() != ATNStateStarLoopEntry {
+ return false
+ }
+ startLoop, ok := _p.(*StarLoopEntryState)
+ if !ok {
+ return false
+ }
+ if !startLoop.precedenceRuleDecision ||
+ config.GetContext().isEmpty() ||
+ config.GetContext().hasEmptyPath() {
+ return false
+ }
+
+ // Require all return states to return back to the same rule
+ // that p is in.
+ numCtxs := config.GetContext().length()
+ for i := 0; i < numCtxs; i++ {
+ returnState := p.atn.states[config.GetContext().getReturnState(i)]
+ if returnState.GetRuleIndex() != _p.GetRuleIndex() {
+ return false
+ }
+ }
+ x := _p.GetTransitions()[0].getTarget()
+ decisionStartState := x.(BlockStartState)
+ blockEndStateNum := decisionStartState.getEndState().stateNumber
+ blockEndState := p.atn.states[blockEndStateNum].(*BlockEndState)
+
+ // Verify that the top of each stack context leads to loop entry/exit
+ // state through epsilon edges and w/o leaving rule.
+
+ for i := 0; i < numCtxs; i++ { // for each stack context
+ returnStateNumber := config.GetContext().getReturnState(i)
+ returnState := p.atn.states[returnStateNumber]
+
+ // all states must have single outgoing epsilon edge
+ if len(returnState.GetTransitions()) != 1 || !returnState.GetTransitions()[0].getIsEpsilon() {
+ return false
+ }
+
+ // Look for prefix op case like 'not expr', (' type ')' expr
+ returnStateTarget := returnState.GetTransitions()[0].getTarget()
+ if returnState.GetStateType() == ATNStateBlockEnd && returnStateTarget == _p {
+ continue
+ }
+
+ // Look for 'expr op expr' or case where expr's return state is block end
+ // of (...)* internal block; the block end points to loop back
+ // which points to p but we don't need to check that
+ if returnState == blockEndState {
+ continue
+ }
+
+ // Look for ternary expr ? expr : expr. The return state points at block end,
+ // which points at loop entry state
+ if returnStateTarget == blockEndState {
+ continue
+ }
+
+ // Look for complex prefix 'between expr and expr' case where 2nd expr's
+ // return state points at block end state of (...)* internal block
+ if returnStateTarget.GetStateType() == ATNStateBlockEnd &&
+ len(returnStateTarget.GetTransitions()) == 1 &&
+ returnStateTarget.GetTransitions()[0].getIsEpsilon() &&
+ returnStateTarget.GetTransitions()[0].getTarget() == _p {
+ continue
+ }
+
+ // anything else ain't conforming
+ return false
+ }
+
+ return true
+}
+
+func (p *ParserATNSimulator) getRuleName(index int) string {
+ if p.parser != nil && index >= 0 {
+ return p.parser.GetRuleNames()[index]
+ }
+ var sb strings.Builder
+ sb.Grow(32)
+
+ sb.WriteString("If {@code to} is {@code nil}, p method returns {@code nil}. +// Otherwise, p method returns the {@link DFAState} returned by calling +// {@link //addDFAState} for the {@code to} state.
+// +// @param dfa The DFA +// @param from The source state for the edge +// @param t The input symbol +// @param to The target state for the edge +// +// @return If {@code to} is {@code nil}, p method returns {@code nil} +// otherwise p method returns the result of calling {@link //addDFAState} +// on {@code to} +// +//goland:noinspection GoBoolExpressions +func (p *ParserATNSimulator) addDFAEdge(dfa *DFA, from *DFAState, t int, to *DFAState) *DFAState { + if runtimeConfig.parserATNSimulatorDebug { + fmt.Println("EDGE " + from.String() + " -> " + to.String() + " upon " + p.GetTokenName(t)) + } + if to == nil { + return nil + } + p.atn.stateMu.Lock() + to = p.addDFAState(dfa, to) // used existing if possible not incoming + p.atn.stateMu.Unlock() + if from == nil || t < -1 || t > p.atn.maxTokenType { + return to + } + p.atn.edgeMu.Lock() + if from.getEdges() == nil { + from.setEdges(make([]*DFAState, p.atn.maxTokenType+1+1)) + } + from.setIthEdge(t+1, to) // connect + p.atn.edgeMu.Unlock() + + if runtimeConfig.parserATNSimulatorDebug { + var names []string + if p.parser != nil { + names = p.parser.GetLiteralNames() + } + + fmt.Println("DFA=\n" + dfa.String(names, nil)) + } + return to +} + +// addDFAState adds state D to the [DFA] if it is not already present, and returns +// the actual instance stored in the [DFA]. If a state equivalent to D +// is already in the [DFA], the existing state is returned. Otherwise, this +// method returns D after adding it to the [DFA]. +// +// If D is [ATNSimulatorError], this method returns [ATNSimulatorError] and +// does not change the DFA. +// +//goland:noinspection GoBoolExpressions +func (p *ParserATNSimulator) addDFAState(dfa *DFA, d *DFAState) *DFAState { + if d == ATNSimulatorError { + return d + } + + existing, present := dfa.Get(d) + if present { + if runtimeConfig.parserATNSimulatorTraceATNSim { + fmt.Print("addDFAState " + d.String() + " exists") + } + return existing + } + + // The state will be added if not already there or we will be given back the existing state struct + // if it is present. + // + d.stateNumber = dfa.Len() + if !d.configs.readOnly { + d.configs.OptimizeConfigs(&p.BaseATNSimulator) + d.configs.readOnly = true + d.configs.configLookup = nil + } + dfa.Put(d) + + if runtimeConfig.parserATNSimulatorTraceATNSim { + fmt.Println("addDFAState new " + d.String()) + } + + return d +} + +//goland:noinspection GoBoolExpressions +func (p *ParserATNSimulator) ReportAttemptingFullContext(dfa *DFA, conflictingAlts *BitSet, configs *ATNConfigSet, startIndex, stopIndex int) { + if runtimeConfig.parserATNSimulatorDebug || runtimeConfig.parserATNSimulatorRetryDebug { + interval := NewInterval(startIndex, stopIndex+1) + fmt.Println("ReportAttemptingFullContext decision=" + strconv.Itoa(dfa.decision) + ":" + configs.String() + + ", input=" + p.parser.GetTokenStream().GetTextFromInterval(interval)) + } + if p.parser != nil { + p.parser.GetErrorListenerDispatch().ReportAttemptingFullContext(p.parser, dfa, startIndex, stopIndex, conflictingAlts, configs) + } +} + +//goland:noinspection GoBoolExpressions +func (p *ParserATNSimulator) ReportContextSensitivity(dfa *DFA, prediction int, configs *ATNConfigSet, startIndex, stopIndex int) { + if runtimeConfig.parserATNSimulatorDebug || runtimeConfig.parserATNSimulatorRetryDebug { + interval := NewInterval(startIndex, stopIndex+1) + fmt.Println("ReportContextSensitivity decision=" + strconv.Itoa(dfa.decision) + ":" + configs.String() + + ", input=" + p.parser.GetTokenStream().GetTextFromInterval(interval)) + } + if p.parser != nil { + p.parser.GetErrorListenerDispatch().ReportContextSensitivity(p.parser, dfa, startIndex, stopIndex, prediction, configs) + } +} + +// ReportAmbiguity reports and ambiguity in the parse, which shows that the parser will explore a different route. +// +// If context-sensitive parsing, we know it's an ambiguity not a conflict or error, but we can report it to the developer +// so that they can see that this is happening and can take action if they want to. +// +//goland:noinspection GoBoolExpressions +func (p *ParserATNSimulator) ReportAmbiguity(dfa *DFA, _ *DFAState, startIndex, stopIndex int, + exact bool, ambigAlts *BitSet, configs *ATNConfigSet) { + if runtimeConfig.parserATNSimulatorDebug || runtimeConfig.parserATNSimulatorRetryDebug { + interval := NewInterval(startIndex, stopIndex+1) + fmt.Println("ReportAmbiguity " + ambigAlts.String() + ":" + configs.String() + + ", input=" + p.parser.GetTokenStream().GetTextFromInterval(interval)) + } + if p.parser != nil { + p.parser.GetErrorListenerDispatch().ReportAmbiguity(p.parser, dfa, startIndex, stopIndex, exact, ambigAlts, configs) + } +} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/parser_rule_context.go b/vendor/github.com/antlr4-go/antlr/v4/parser_rule_context.go similarity index 77% rename from vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/parser_rule_context.go rename to vendor/github.com/antlr4-go/antlr/v4/parser_rule_context.go index 1c8cee747..c249bc138 100644 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/parser_rule_context.go +++ b/vendor/github.com/antlr4-go/antlr/v4/parser_rule_context.go @@ -31,7 +31,9 @@ type ParserRuleContext interface { } type BaseParserRuleContext struct { - *BaseRuleContext + parentCtx RuleContext + invokingState int + RuleIndex int start, stop Token exception RecognitionException @@ -40,8 +42,22 @@ type BaseParserRuleContext struct { func NewBaseParserRuleContext(parent ParserRuleContext, invokingStateNumber int) *BaseParserRuleContext { prc := new(BaseParserRuleContext) + InitBaseParserRuleContext(prc, parent, invokingStateNumber) + return prc +} + +func InitBaseParserRuleContext(prc *BaseParserRuleContext, parent ParserRuleContext, invokingStateNumber int) { + // What context invoked b rule? + prc.parentCtx = parent - prc.BaseRuleContext = NewBaseRuleContext(parent, invokingStateNumber) + // What state invoked the rule associated with b context? + // The "return address" is the followState of invokingState + // If parent is nil, b should be -1. + if parent == nil { + prc.invokingState = -1 + } else { + prc.invokingState = invokingStateNumber + } prc.RuleIndex = -1 // * If we are debugging or building a parse tree for a Visitor, @@ -56,8 +72,6 @@ func NewBaseParserRuleContext(parent ParserRuleContext, invokingStateNumber int) // The exception that forced prc rule to return. If the rule successfully // completed, prc is {@code nil}. prc.exception = nil - - return prc } func (prc *BaseParserRuleContext) SetException(e RecognitionException) { @@ -90,14 +104,15 @@ func (prc *BaseParserRuleContext) GetText() string { return s } -// Double dispatch methods for listeners -func (prc *BaseParserRuleContext) EnterRule(listener ParseTreeListener) { +// EnterRule is called when any rule is entered. +func (prc *BaseParserRuleContext) EnterRule(_ ParseTreeListener) { } -func (prc *BaseParserRuleContext) ExitRule(listener ParseTreeListener) { +// ExitRule is called when any rule is exited. +func (prc *BaseParserRuleContext) ExitRule(_ ParseTreeListener) { } -// * Does not set parent link other add methods do that/// +// * Does not set parent link other add methods do that func (prc *BaseParserRuleContext) addTerminalNodeChild(child TerminalNode) TerminalNode { if prc.children == nil { prc.children = make([]Tree, 0) @@ -120,10 +135,9 @@ func (prc *BaseParserRuleContext) AddChild(child RuleContext) RuleContext { return child } -// * Used by EnterOuterAlt to toss out a RuleContext previously added as -// we entered a rule. If we have // label, we will need to remove -// generic ruleContext object. -// / +// RemoveLastChild is used by [EnterOuterAlt] to toss out a [RuleContext] previously added as +// we entered a rule. If we have a label, we will need to remove +// the generic ruleContext object. func (prc *BaseParserRuleContext) RemoveLastChild() { if prc.children != nil && len(prc.children) > 0 { prc.children = prc.children[0 : len(prc.children)-1] @@ -293,7 +307,7 @@ func (prc *BaseParserRuleContext) GetChildCount() int { return len(prc.children) } -func (prc *BaseParserRuleContext) GetSourceInterval() *Interval { +func (prc *BaseParserRuleContext) GetSourceInterval() Interval { if prc.start == nil || prc.stop == nil { return TreeInvalidInterval } @@ -340,6 +354,50 @@ func (prc *BaseParserRuleContext) String(ruleNames []string, stop RuleContext) s return s } +func (prc *BaseParserRuleContext) SetParent(v Tree) { + if v == nil { + prc.parentCtx = nil + } else { + prc.parentCtx = v.(RuleContext) + } +} + +func (prc *BaseParserRuleContext) GetInvokingState() int { + return prc.invokingState +} + +func (prc *BaseParserRuleContext) SetInvokingState(t int) { + prc.invokingState = t +} + +func (prc *BaseParserRuleContext) GetRuleIndex() int { + return prc.RuleIndex +} + +func (prc *BaseParserRuleContext) GetAltNumber() int { + return ATNInvalidAltNumber +} + +func (prc *BaseParserRuleContext) SetAltNumber(_ int) {} + +// IsEmpty returns true if the context of b is empty. +// +// A context is empty if there is no invoking state, meaning nobody calls +// current context. +func (prc *BaseParserRuleContext) IsEmpty() bool { + return prc.invokingState == -1 +} + +// GetParent returns the combined text of all child nodes. This method only considers +// tokens which have been added to the parse tree. +// +// Since tokens on hidden channels (e.g. whitespace or comments) are not +// added to the parse trees, they will not appear in the output of this +// method. +func (prc *BaseParserRuleContext) GetParent() Tree { + return prc.parentCtx +} + var ParserRuleContextEmpty = NewBaseParserRuleContext(nil, -1) type InterpreterRuleContext interface { @@ -350,6 +408,7 @@ type BaseInterpreterRuleContext struct { *BaseParserRuleContext } +//goland:noinspection GoUnusedExportedFunction func NewBaseInterpreterRuleContext(parent BaseInterpreterRuleContext, invokingStateNumber, ruleIndex int) *BaseInterpreterRuleContext { prc := new(BaseInterpreterRuleContext) diff --git a/vendor/github.com/antlr4-go/antlr/v4/prediction_context.go b/vendor/github.com/antlr4-go/antlr/v4/prediction_context.go new file mode 100644 index 000000000..c1b80cc1f --- /dev/null +++ b/vendor/github.com/antlr4-go/antlr/v4/prediction_context.go @@ -0,0 +1,727 @@ +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +import ( + "fmt" + "golang.org/x/exp/slices" + "strconv" +) + +var _emptyPredictionContextHash int + +func init() { + _emptyPredictionContextHash = murmurInit(1) + _emptyPredictionContextHash = murmurFinish(_emptyPredictionContextHash, 0) +} + +func calculateEmptyHash() int { + return _emptyPredictionContextHash +} + +const ( + // BasePredictionContextEmptyReturnState represents {@code $} in an array in full context mode, $ + // doesn't mean wildcard: + // + // $ + x = [$,x] + // + // Here, + // + // $ = EmptyReturnState + BasePredictionContextEmptyReturnState = 0x7FFFFFFF +) + +// TODO: JI These are meant to be atomics - this does not seem to match the Java runtime here +// +//goland:noinspection GoUnusedGlobalVariable +var ( + BasePredictionContextglobalNodeCount = 1 + BasePredictionContextid = BasePredictionContextglobalNodeCount +) + +const ( + PredictionContextEmpty = iota + PredictionContextSingleton + PredictionContextArray +) + +// PredictionContext is a go idiomatic implementation of PredictionContext that does not rty to +// emulate inheritance from Java, and can be used without an interface definition. An interface +// is not required because no user code will ever need to implement this interface. +type PredictionContext struct { + cachedHash int + pcType int + parentCtx *PredictionContext + returnState int + parents []*PredictionContext + returnStates []int +} + +func NewEmptyPredictionContext() *PredictionContext { + nep := &PredictionContext{} + nep.cachedHash = calculateEmptyHash() + nep.pcType = PredictionContextEmpty + nep.returnState = BasePredictionContextEmptyReturnState + return nep +} + +func NewBaseSingletonPredictionContext(parent *PredictionContext, returnState int) *PredictionContext { + pc := &PredictionContext{} + pc.pcType = PredictionContextSingleton + pc.returnState = returnState + pc.parentCtx = parent + if parent != nil { + pc.cachedHash = calculateHash(parent, returnState) + } else { + pc.cachedHash = calculateEmptyHash() + } + return pc +} + +func SingletonBasePredictionContextCreate(parent *PredictionContext, returnState int) *PredictionContext { + if returnState == BasePredictionContextEmptyReturnState && parent == nil { + // someone can pass in the bits of an array ctx that mean $ + return BasePredictionContextEMPTY + } + return NewBaseSingletonPredictionContext(parent, returnState) +} + +func NewArrayPredictionContext(parents []*PredictionContext, returnStates []int) *PredictionContext { + // Parent can be nil only if full ctx mode and we make an array + // from {@link //EMPTY} and non-empty. We merge {@link //EMPTY} by using + // nil parent and + // returnState == {@link //EmptyReturnState}. + hash := murmurInit(1) + for _, parent := range parents { + hash = murmurUpdate(hash, parent.Hash()) + } + for _, returnState := range returnStates { + hash = murmurUpdate(hash, returnState) + } + hash = murmurFinish(hash, len(parents)<<1) + + nec := &PredictionContext{} + nec.cachedHash = hash + nec.pcType = PredictionContextArray + nec.parents = parents + nec.returnStates = returnStates + return nec +} + +func (p *PredictionContext) Hash() int { + return p.cachedHash +} + +func (p *PredictionContext) Equals(other Collectable[*PredictionContext]) bool { + switch p.pcType { + case PredictionContextEmpty: + otherP := other.(*PredictionContext) + return other == nil || otherP == nil || otherP.isEmpty() + case PredictionContextSingleton: + return p.SingletonEquals(other) + case PredictionContextArray: + return p.ArrayEquals(other) + } + return false +} + +func (p *PredictionContext) ArrayEquals(o Collectable[*PredictionContext]) bool { + if o == nil { + return false + } + other := o.(*PredictionContext) + if other == nil || other.pcType != PredictionContextArray { + return false + } + if p.cachedHash != other.Hash() { + return false // can't be same if hash is different + } + + // Must compare the actual array elements and not just the array address + // + return slices.Equal(p.returnStates, other.returnStates) && + slices.EqualFunc(p.parents, other.parents, func(x, y *PredictionContext) bool { + return x.Equals(y) + }) +} + +func (p *PredictionContext) SingletonEquals(other Collectable[*PredictionContext]) bool { + if other == nil { + return false + } + otherP := other.(*PredictionContext) + if otherP == nil { + return false + } + + if p.cachedHash != otherP.Hash() { + return false // Can't be same if hash is different + } + + if p.returnState != otherP.getReturnState(0) { + return false + } + + // Both parents must be nil if one is + if p.parentCtx == nil { + return otherP.parentCtx == nil + } + + return p.parentCtx.Equals(otherP.parentCtx) +} + +func (p *PredictionContext) GetParent(i int) *PredictionContext { + switch p.pcType { + case PredictionContextEmpty: + return nil + case PredictionContextSingleton: + return p.parentCtx + case PredictionContextArray: + return p.parents[i] + } + return nil +} + +func (p *PredictionContext) getReturnState(i int) int { + switch p.pcType { + case PredictionContextArray: + return p.returnStates[i] + default: + return p.returnState + } +} + +func (p *PredictionContext) GetReturnStates() []int { + switch p.pcType { + case PredictionContextArray: + return p.returnStates + default: + return []int{p.returnState} + } +} + +func (p *PredictionContext) length() int { + switch p.pcType { + case PredictionContextArray: + return len(p.returnStates) + default: + return 1 + } +} + +func (p *PredictionContext) hasEmptyPath() bool { + switch p.pcType { + case PredictionContextSingleton: + return p.returnState == BasePredictionContextEmptyReturnState + } + return p.getReturnState(p.length()-1) == BasePredictionContextEmptyReturnState +} + +func (p *PredictionContext) String() string { + switch p.pcType { + case PredictionContextEmpty: + return "$" + case PredictionContextSingleton: + var up string + + if p.parentCtx == nil { + up = "" + } else { + up = p.parentCtx.String() + } + + if len(up) == 0 { + if p.returnState == BasePredictionContextEmptyReturnState { + return "$" + } + + return strconv.Itoa(p.returnState) + } + + return strconv.Itoa(p.returnState) + " " + up + case PredictionContextArray: + if p.isEmpty() { + return "[]" + } + + s := "[" + for i := 0; i < len(p.returnStates); i++ { + if i > 0 { + s = s + ", " + } + if p.returnStates[i] == BasePredictionContextEmptyReturnState { + s = s + "$" + continue + } + s = s + strconv.Itoa(p.returnStates[i]) + if !p.parents[i].isEmpty() { + s = s + " " + p.parents[i].String() + } else { + s = s + "nil" + } + } + return s + "]" + + default: + return "unknown" + } +} + +func (p *PredictionContext) isEmpty() bool { + switch p.pcType { + case PredictionContextEmpty: + return true + case PredictionContextArray: + // since EmptyReturnState can only appear in the last position, we + // don't need to verify that size==1 + return p.returnStates[0] == BasePredictionContextEmptyReturnState + default: + return false + } +} + +func (p *PredictionContext) Type() int { + return p.pcType +} + +func calculateHash(parent *PredictionContext, returnState int) int { + h := murmurInit(1) + h = murmurUpdate(h, parent.Hash()) + h = murmurUpdate(h, returnState) + return murmurFinish(h, 2) +} + +// Convert a {@link RuleContext} tree to a {@link BasePredictionContext} graph. +// Return {@link //EMPTY} if {@code outerContext} is empty or nil. +// / +func predictionContextFromRuleContext(a *ATN, outerContext RuleContext) *PredictionContext { + if outerContext == nil { + outerContext = ParserRuleContextEmpty + } + // if we are in RuleContext of start rule, s, then BasePredictionContext + // is EMPTY. Nobody called us. (if we are empty, return empty) + if outerContext.GetParent() == nil || outerContext == ParserRuleContextEmpty { + return BasePredictionContextEMPTY + } + // If we have a parent, convert it to a BasePredictionContext graph + parent := predictionContextFromRuleContext(a, outerContext.GetParent().(RuleContext)) + state := a.states[outerContext.GetInvokingState()] + transition := state.GetTransitions()[0] + + return SingletonBasePredictionContextCreate(parent, transition.(*RuleTransition).followState.GetStateNumber()) +} + +func merge(a, b *PredictionContext, rootIsWildcard bool, mergeCache *JPCMap) *PredictionContext { + + // Share same graph if both same + // + if a == b || a.Equals(b) { + return a + } + + if a.pcType == PredictionContextSingleton && b.pcType == PredictionContextSingleton { + return mergeSingletons(a, b, rootIsWildcard, mergeCache) + } + // At least one of a or b is array + // If one is $ and rootIsWildcard, return $ as wildcard + if rootIsWildcard { + if a.isEmpty() { + return a + } + if b.isEmpty() { + return b + } + } + + // Convert either Singleton or Empty to arrays, so that we can merge them + // + ara := convertToArray(a) + arb := convertToArray(b) + return mergeArrays(ara, arb, rootIsWildcard, mergeCache) +} + +func convertToArray(pc *PredictionContext) *PredictionContext { + switch pc.Type() { + case PredictionContextEmpty: + return NewArrayPredictionContext([]*PredictionContext{}, []int{}) + case PredictionContextSingleton: + return NewArrayPredictionContext([]*PredictionContext{pc.GetParent(0)}, []int{pc.getReturnState(0)}) + default: + // Already an array + } + return pc +} + +// mergeSingletons merges two Singleton [PredictionContext] instances. +// +// Stack tops equal, parents merge is same return left graph. +// +// +//Same stack top, parents differ merge parents giving array node, then
+// remainders of those graphs. A new root node is created to point to the
+// merged parents.
+//
Different stack tops pointing to same parent. Make array node for the
+// root where both element in the root point to the same (original)
+// parent.
+//
Different stack tops pointing to different parents. Make array node for
+// the root where each element points to the corresponding original
+// parent.
+//
These local-context merge operations are used when {@code rootIsWildcard} +// is true.
+// +//{@link //EMPTY} is superset of any graph return {@link //EMPTY}.
+//
{@link //EMPTY} and anything is {@code //EMPTY}, so merged parent is
+// {@code //EMPTY} return left graph.
+//
Special case of last merge if local context.
+//
These full-context merge operations are used when {@code rootIsWildcard} +// is false.
+// +// +// +//Must keep all contexts {@link //EMPTY} in array is a special value (and
+// nil parent).
+//
Different tops, different parents.
+//
Shared top, same parents.
+//
Shared top, different parents.
+//
Shared top, all shared parents.
+//
Equal tops, merge parents and reduce top to
+// {@link SingletonBasePredictionContext}.
+//
I have scoped the {@link AND}, {@link OR}, and {@link Predicate} subclasses of -// {@link SemanticContext} within the scope of this outer class.
+// an ATN configuration is valid. It's either a single predicate, +// a conjunction p1 && p2, or a sum of products p1 || p2. // - +// I have scoped the AND, OR, and Predicate subclasses of +// [SemanticContext] within the scope of this outer ``class'' type SemanticContext interface { Equals(other Collectable[SemanticContext]) bool Hash() int @@ -80,7 +79,7 @@ func NewPredicate(ruleIndex, predIndex int, isCtxDependent bool) *Predicate { var SemanticContextNone = NewPredicate(-1, -1, false) -func (p *Predicate) evalPrecedence(parser Recognizer, outerContext RuleContext) SemanticContext { +func (p *Predicate) evalPrecedence(_ Recognizer, _ RuleContext) SemanticContext { return p } @@ -198,7 +197,7 @@ type AND struct { func NewAND(a, b SemanticContext) *AND { - operands := NewJStore[SemanticContext, Comparator[SemanticContext]](semctxEqInst) + operands := NewJStore[SemanticContext, Comparator[SemanticContext]](semctxEqInst, SemanticContextCollection, "NewAND() operands") if aa, ok := a.(*AND); ok { for _, o := range aa.opnds { operands.Put(o) @@ -230,9 +229,7 @@ func NewAND(a, b SemanticContext) *AND { vs := operands.Values() opnds := make([]SemanticContext, len(vs)) - for i, v := range vs { - opnds[i] = v.(SemanticContext) - } + copy(opnds, vs) and := new(AND) and.opnds = opnds @@ -316,12 +313,12 @@ func (a *AND) Hash() int { return murmurFinish(h, len(a.opnds)) } -func (a *OR) Hash() int { - h := murmurInit(41) // Init with a value different from AND - for _, op := range a.opnds { +func (o *OR) Hash() int { + h := murmurInit(41) // Init with o value different from AND + for _, op := range o.opnds { h = murmurUpdate(h, op.Hash()) } - return murmurFinish(h, len(a.opnds)) + return murmurFinish(h, len(o.opnds)) } func (a *AND) String() string { @@ -349,7 +346,7 @@ type OR struct { func NewOR(a, b SemanticContext) *OR { - operands := NewJStore[SemanticContext, Comparator[SemanticContext]](semctxEqInst) + operands := NewJStore[SemanticContext, Comparator[SemanticContext]](semctxEqInst, SemanticContextCollection, "NewOR() operands") if aa, ok := a.(*OR); ok { for _, o := range aa.opnds { operands.Put(o) @@ -382,9 +379,7 @@ func NewOR(a, b SemanticContext) *OR { vs := operands.Values() opnds := make([]SemanticContext, len(vs)) - for i, v := range vs { - opnds[i] = v.(SemanticContext) - } + copy(opnds, vs) o := new(OR) o.opnds = opnds diff --git a/vendor/github.com/antlr4-go/antlr/v4/statistics.go b/vendor/github.com/antlr4-go/antlr/v4/statistics.go new file mode 100644 index 000000000..70c0673a0 --- /dev/null +++ b/vendor/github.com/antlr4-go/antlr/v4/statistics.go @@ -0,0 +1,281 @@ +//go:build antlr.stats + +package antlr + +import ( + "fmt" + "log" + "os" + "path/filepath" + "sort" + "strconv" + "sync" +) + +// This file allows the user to collect statistics about the runtime of the ANTLR runtime. It is not enabled by default +// and so incurs no time penalty. To enable it, you must build the runtime with the antlr.stats build tag. +// + +// Tells various components to collect statistics - because it is only true when this file is included, it will +// allow the compiler to completely eliminate all the code that is only used when collecting statistics. +const collectStats = true + +// goRunStats is a collection of all the various data the ANTLR runtime has collected about a particular run. +// It is exported so that it can be used by others to look for things that are not already looked for in the +// runtime statistics. +type goRunStats struct { + + // jStats is a slice of all the [JStatRec] records that have been created, which is one for EVERY collection created + // during a run. It is exported so that it can be used by others to look for things that are not already looked for + // within this package. + // + jStats []*JStatRec + jStatsLock sync.RWMutex + topN int + topNByMax []*JStatRec + topNByUsed []*JStatRec + unusedCollections map[CollectionSource]int + counts map[CollectionSource]int +} + +const ( + collectionsFile = "collections" +) + +var ( + Statistics = &goRunStats{ + topN: 10, + } +) + +type statsOption func(*goRunStats) error + +// Configure allows the statistics system to be configured as the user wants and override the defaults +func (s *goRunStats) Configure(options ...statsOption) error { + for _, option := range options { + err := option(s) + if err != nil { + return err + } + } + return nil +} + +// WithTopN sets the number of things to list in the report when we are concerned with the top N things. +// +// For example, if you want to see the top 20 collections by size, you can do: +// +// antlr.Statistics.Configure(antlr.WithTopN(20)) +func WithTopN(topN int) statsOption { + return func(s *goRunStats) error { + s.topN = topN + return nil + } +} + +// Analyze looks through all the statistical records and computes all the outputs that might be useful to the user. +// +// The function gathers and analyzes a number of statistics about any particular run of +// an ANTLR generated recognizer. In the vast majority of cases, the statistics are only +// useful to maintainers of ANTLR itself, but they can be useful to users as well. They may be +// especially useful in tracking down bugs or performance problems when an ANTLR user could +// supply the output from this package, but cannot supply the grammar file(s) they are using, even +// privately to the maintainers. +// +// The statistics are gathered by the runtime itself, and are not gathered by the parser or lexer, but the user +// must call this function their selves to analyze the statistics. This is because none of the infrastructure is +// extant unless the calling program is built with the antlr.stats tag like so: +// +// go build -tags antlr.stats . +// +// When a program is built with the antlr.stats tag, the Statistics object is created and available outside +// the package. The user can then call the [Statistics.Analyze] function to analyze the statistics and then call the +// [Statistics.Report] function to report the statistics. +// +// Please forward any questions about this package to the ANTLR discussion groups on GitHub or send to them to +// me [Jim Idle] directly at jimi@idle.ws +// +// [Jim Idle]: https:://github.com/jim-idle +func (s *goRunStats) Analyze() { + + // Look for anything that looks strange and record it in our local maps etc for the report to present it + // + s.CollectionAnomalies() + s.TopNCollections() +} + +// TopNCollections looks through all the statistical records and gathers the top ten collections by size. +func (s *goRunStats) TopNCollections() { + + // Let's sort the stat records by MaxSize + // + sort.Slice(s.jStats, func(i, j int) bool { + return s.jStats[i].MaxSize > s.jStats[j].MaxSize + }) + + for i := 0; i < len(s.jStats) && i < s.topN; i++ { + s.topNByMax = append(s.topNByMax, s.jStats[i]) + } + + // Sort by the number of times used + // + sort.Slice(s.jStats, func(i, j int) bool { + return s.jStats[i].Gets+s.jStats[i].Puts > s.jStats[j].Gets+s.jStats[j].Puts + }) + for i := 0; i < len(s.jStats) && i < s.topN; i++ { + s.topNByUsed = append(s.topNByUsed, s.jStats[i]) + } +} + +// Report dumps a markdown formatted report of all the statistics collected during a run to the given dir output +// path, which should represent a directory. Generated files will be prefixed with the given prefix and will be +// given a type name such as `anomalies` and a time stamp such as `2021-09-01T12:34:56` and a .md suffix. +func (s *goRunStats) Report(dir string, prefix string) error { + + isDir, err := isDirectory(dir) + switch { + case err != nil: + return err + case !isDir: + return fmt.Errorf("output directory `%s` is not a directory", dir) + } + s.reportCollections(dir, prefix) + + // Clean out any old data in case the user forgets + // + s.Reset() + return nil +} + +func (s *goRunStats) Reset() { + s.jStats = nil + s.topNByUsed = nil + s.topNByMax = nil +} + +func (s *goRunStats) reportCollections(dir, prefix string) { + cname := filepath.Join(dir, ".asciidoctor") + // If the file doesn't exist, create it, or append to the file + f, err := os.OpenFile(cname, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644) + if err != nil { + log.Fatal(err) + } + _, _ = f.WriteString(`// .asciidoctorconfig +++++ + +++++`) + _ = f.Close() + + fname := filepath.Join(dir, prefix+"_"+"_"+collectionsFile+"_"+".adoc") + // If the file doesn't exist, create it, or append to the file + f, err = os.OpenFile(fname, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644) + if err != nil { + log.Fatal(err) + } + defer func(f *os.File) { + err := f.Close() + if err != nil { + log.Fatal(err) + } + }(f) + _, _ = f.WriteString("= Collections for " + prefix + "\n\n") + + _, _ = f.WriteString("== Summary\n") + + if s.unusedCollections != nil { + _, _ = f.WriteString("=== Unused Collections\n") + _, _ = f.WriteString("Unused collections incur a penalty for allocation that makes them a candidate for either\n") + _, _ = f.WriteString(" removal or optimization. If you are using a collection that is not used, you should\n") + _, _ = f.WriteString(" consider removing it. If you are using a collection that is used, but not very often,\n") + _, _ = f.WriteString(" you should consider using lazy initialization to defer the allocation until it is\n") + _, _ = f.WriteString(" actually needed.\n\n") + + _, _ = f.WriteString("\n.Unused collections\n") + _, _ = f.WriteString(`[cols="<3,>1"]` + "\n\n") + _, _ = f.WriteString("|===\n") + _, _ = f.WriteString("| Type | Count\n") + + for k, v := range s.unusedCollections { + _, _ = f.WriteString("| " + CollectionDescriptors[k].SybolicName + " | " + strconv.Itoa(v) + "\n") + } + f.WriteString("|===\n\n") + } + + _, _ = f.WriteString("\n.Summary of Collections\n") + _, _ = f.WriteString(`[cols="<3,>1"]` + "\n\n") + _, _ = f.WriteString("|===\n") + _, _ = f.WriteString("| Type | Count\n") + for k, v := range s.counts { + _, _ = f.WriteString("| " + CollectionDescriptors[k].SybolicName + " | " + strconv.Itoa(v) + "\n") + } + _, _ = f.WriteString("| Total | " + strconv.Itoa(len(s.jStats)) + "\n") + _, _ = f.WriteString("|===\n\n") + + _, _ = f.WriteString("\n.Summary of Top " + strconv.Itoa(s.topN) + " Collections by MaxSize\n") + _, _ = f.WriteString(`[cols="<1,<3,>1,>1,>1,>1"]` + "\n\n") + _, _ = f.WriteString("|===\n") + _, _ = f.WriteString("| Source | Description | MaxSize | EndSize | Puts | Gets\n") + for _, c := range s.topNByMax { + _, _ = f.WriteString("| " + CollectionDescriptors[c.Source].SybolicName + "\n") + _, _ = f.WriteString("| " + c.Description + "\n") + _, _ = f.WriteString("| " + strconv.Itoa(c.MaxSize) + "\n") + _, _ = f.WriteString("| " + strconv.Itoa(c.CurSize) + "\n") + _, _ = f.WriteString("| " + strconv.Itoa(c.Puts) + "\n") + _, _ = f.WriteString("| " + strconv.Itoa(c.Gets) + "\n") + _, _ = f.WriteString("\n") + } + _, _ = f.WriteString("|===\n\n") + + _, _ = f.WriteString("\n.Summary of Top " + strconv.Itoa(s.topN) + " Collections by Access\n") + _, _ = f.WriteString(`[cols="<1,<3,>1,>1,>1,>1,>1"]` + "\n\n") + _, _ = f.WriteString("|===\n") + _, _ = f.WriteString("| Source | Description | MaxSize | EndSize | Puts | Gets | P+G\n") + for _, c := range s.topNByUsed { + _, _ = f.WriteString("| " + CollectionDescriptors[c.Source].SybolicName + "\n") + _, _ = f.WriteString("| " + c.Description + "\n") + _, _ = f.WriteString("| " + strconv.Itoa(c.MaxSize) + "\n") + _, _ = f.WriteString("| " + strconv.Itoa(c.CurSize) + "\n") + _, _ = f.WriteString("| " + strconv.Itoa(c.Puts) + "\n") + _, _ = f.WriteString("| " + strconv.Itoa(c.Gets) + "\n") + _, _ = f.WriteString("| " + strconv.Itoa(c.Gets+c.Puts) + "\n") + _, _ = f.WriteString("\n") + } + _, _ = f.WriteString("|===\n\n") +} + +// AddJStatRec adds a [JStatRec] record to the [goRunStats] collection when build runtimeConfig antlr.stats is enabled. +func (s *goRunStats) AddJStatRec(rec *JStatRec) { + s.jStatsLock.Lock() + defer s.jStatsLock.Unlock() + s.jStats = append(s.jStats, rec) +} + +// CollectionAnomalies looks through all the statistical records and gathers any anomalies that have been found. +func (s *goRunStats) CollectionAnomalies() { + s.jStatsLock.RLock() + defer s.jStatsLock.RUnlock() + s.counts = make(map[CollectionSource]int, len(s.jStats)) + for _, c := range s.jStats { + + // Accumlate raw counts + // + s.counts[c.Source]++ + + // Look for allocated but unused collections and count them + if c.MaxSize == 0 && c.Puts == 0 { + if s.unusedCollections == nil { + s.unusedCollections = make(map[CollectionSource]int) + } + s.unusedCollections[c.Source]++ + } + if c.MaxSize > 6000 { + fmt.Println("Collection ", c.Description, "accumulated a max size of ", c.MaxSize, " - this is probably too large and indicates a poorly formed grammar") + } + } + +} diff --git a/vendor/github.com/antlr4-go/antlr/v4/stats_data.go b/vendor/github.com/antlr4-go/antlr/v4/stats_data.go new file mode 100644 index 000000000..4d9eb94e5 --- /dev/null +++ b/vendor/github.com/antlr4-go/antlr/v4/stats_data.go @@ -0,0 +1,23 @@ +package antlr + +// A JStatRec is a record of a particular use of a [JStore], [JMap] or JPCMap] collection. Typically, it will be +// used to look for unused collections that wre allocated anyway, problems with hash bucket clashes, and anomalies +// such as huge numbers of Gets with no entries found GetNoEnt. You can refer to the CollectionAnomalies() function +// for ideas on what can be gleaned from these statistics about collections. +type JStatRec struct { + Source CollectionSource + MaxSize int + CurSize int + Gets int + GetHits int + GetMisses int + GetHashConflicts int + GetNoEnt int + Puts int + PutHits int + PutMisses int + PutHashConflicts int + MaxSlotSize int + Description string + CreateStack []byte +} diff --git a/vendor/github.com/antlr4-go/antlr/v4/token.go b/vendor/github.com/antlr4-go/antlr/v4/token.go new file mode 100644 index 000000000..9670efb82 --- /dev/null +++ b/vendor/github.com/antlr4-go/antlr/v4/token.go @@ -0,0 +1,213 @@ +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +import ( + "strconv" + "strings" +) + +type TokenSourceCharStreamPair struct { + tokenSource TokenSource + charStream CharStream +} + +// A token has properties: text, type, line, character position in the line +// (so we can ignore tabs), token channel, index, and source from which +// we obtained this token. + +type Token interface { + GetSource() *TokenSourceCharStreamPair + GetTokenType() int + GetChannel() int + GetStart() int + GetStop() int + GetLine() int + GetColumn() int + + GetText() string + SetText(s string) + + GetTokenIndex() int + SetTokenIndex(v int) + + GetTokenSource() TokenSource + GetInputStream() CharStream + + String() string +} + +type BaseToken struct { + source *TokenSourceCharStreamPair + tokenType int // token type of the token + channel int // The parser ignores everything not on DEFAULT_CHANNEL + start int // optional return -1 if not implemented. + stop int // optional return -1 if not implemented. + tokenIndex int // from 0..n-1 of the token object in the input stream + line int // line=1..n of the 1st character + column int // beginning of the line at which it occurs, 0..n-1 + text string // text of the token. + readOnly bool +} + +const ( + TokenInvalidType = 0 + + // TokenEpsilon - during lookahead operations, this "token" signifies we hit the rule end [ATN] state + // and did not follow it despite needing to. + TokenEpsilon = -2 + + TokenMinUserTokenType = 1 + + TokenEOF = -1 + + // TokenDefaultChannel is the default channel upon which tokens are sent to the parser. + // + // All tokens go to the parser (unless [Skip] is called in the lexer rule) + // on a particular "channel". The parser tunes to a particular channel + // so that whitespace etc... can go to the parser on a "hidden" channel. + TokenDefaultChannel = 0 + + // TokenHiddenChannel defines the normal hidden channel - the parser wil not see tokens that are not on [TokenDefaultChannel]. + // + // Anything on a different channel than TokenDefaultChannel is not parsed by parser. + TokenHiddenChannel = 1 +) + +func (b *BaseToken) GetChannel() int { + return b.channel +} + +func (b *BaseToken) GetStart() int { + return b.start +} + +func (b *BaseToken) GetStop() int { + return b.stop +} + +func (b *BaseToken) GetLine() int { + return b.line +} + +func (b *BaseToken) GetColumn() int { + return b.column +} + +func (b *BaseToken) GetTokenType() int { + return b.tokenType +} + +func (b *BaseToken) GetSource() *TokenSourceCharStreamPair { + return b.source +} + +func (b *BaseToken) GetTokenIndex() int { + return b.tokenIndex +} + +func (b *BaseToken) SetTokenIndex(v int) { + b.tokenIndex = v +} + +func (b *BaseToken) GetTokenSource() TokenSource { + return b.source.tokenSource +} + +func (b *BaseToken) GetInputStream() CharStream { + return b.source.charStream +} + +type CommonToken struct { + BaseToken +} + +func NewCommonToken(source *TokenSourceCharStreamPair, tokenType, channel, start, stop int) *CommonToken { + + t := &CommonToken{ + BaseToken: BaseToken{ + source: source, + tokenType: tokenType, + channel: channel, + start: start, + stop: stop, + tokenIndex: -1, + }, + } + + if t.source.tokenSource != nil { + t.line = source.tokenSource.GetLine() + t.column = source.tokenSource.GetCharPositionInLine() + } else { + t.column = -1 + } + return t +} + +// An empty {@link Pair} which is used as the default value of +// {@link //source} for tokens that do not have a source. + +//CommonToken.EMPTY_SOURCE = [ nil, nil ] + +// Constructs a New{@link CommonToken} as a copy of another {@link Token}. +// +//+// If {@code oldToken} is also a {@link CommonToken} instance, the newly +// constructed token will share a reference to the {@link //text} field and +// the {@link Pair} stored in {@link //source}. Otherwise, {@link //text} will +// be assigned the result of calling {@link //GetText}, and {@link //source} +// will be constructed from the result of {@link Token//GetTokenSource} and +// {@link Token//GetInputStream}.
+// +// @param oldToken The token to copy. +func (c *CommonToken) clone() *CommonToken { + t := NewCommonToken(c.source, c.tokenType, c.channel, c.start, c.stop) + t.tokenIndex = c.GetTokenIndex() + t.line = c.GetLine() + t.column = c.GetColumn() + t.text = c.GetText() + return t +} + +func (c *CommonToken) GetText() string { + if c.text != "" { + return c.text + } + input := c.GetInputStream() + if input == nil { + return "" + } + n := input.Size() + if c.start < n && c.stop < n { + return input.GetTextFromInterval(NewInterval(c.start, c.stop)) + } + return "+// You can insert stuff, replace, and delete chunks. Note that the operations +// are done lazily--only if you convert the buffer to a {@link String} with +// {@link TokenStream#getText()}. This is very efficient because you are not +// moving data around all the time. As the buffer of tokens is converted to +// strings, the {@link #getText()} method(s) scan the input token stream and +// check to see if there is an operation at the current index. If so, the +// operation is done and then normal {@link String} rendering continues on the +// buffer. This is like having multiple Turing machine instruction streams +// (programs) operating on a single input tape. :)
+//+ +// This rewriter makes no modifications to the token stream. It does not ask the +// stream to fill itself up nor does it advance the input cursor. The token +// stream {@link TokenStream#index()} will return the same value before and +// after any {@link #getText()} call.
+ +//+// The rewriter only works on tokens that you have in the buffer and ignores the +// current input cursor. If you are buffering tokens on-demand, calling +// {@link #getText()} halfway through the input will only do rewrites for those +// tokens in the first half of the file.
+ +//+// Since the operations are done lazily at {@link #getText}-time, operations do +// not screw up the token index values. That is, an insert operation at token +// index {@code i} does not change the index values for tokens +// {@code i}+1..n-1.
+ +//+// Because operations never actually alter the buffer, you may always get the +// original token stream back without undoing anything. Since the instructions +// are queued up, you can easily simulate transactions and roll back any changes +// if there is an error just by removing instructions. For example,
+ +//
+// CharStream input = new ANTLRFileStream("input");
+// TLexer lex = new TLexer(input);
+// CommonTokenStream tokens = new CommonTokenStream(lex);
+// T parser = new T(tokens);
+// TokenStreamRewriter rewriter = new TokenStreamRewriter(tokens);
+// parser.startRule();
+//
+
+// +// Then in the rules, you can execute (assuming rewriter is visible):
+ +//+// Token t,u; +// ... +// rewriter.insertAfter(t, "text to put after t");} +// rewriter.insertAfter(u, "text after u");} +// System.out.println(rewriter.getText()); +//+ +//
+// You can also have multiple "instruction streams" and get multiple rewrites +// from a single pass over the input. Just name the instruction streams and use +// that name again when printing the buffer. This could be useful for generating +// a C file and also its header file--all from the same buffer:
+ +//
+// rewriter.insertAfter("pass1", t, "text to put after t");}
+// rewriter.insertAfter("pass2", u, "text after u");}
+// System.out.println(rewriter.getText("pass1"));
+// System.out.println(rewriter.getText("pass2"));
+//
+
+// +// If you don't use named rewrite streams, a "default" stream is used as the +// first example shows.
+ +const ( + DefaultProgramName = "default" + ProgramInitSize = 100 + MinTokenIndex = 0 +) + +// Define the rewrite operation hierarchy + +type RewriteOperation interface { + + // Execute the rewrite operation by possibly adding to the buffer. + // Return the index of the next token to operate on. + Execute(buffer *bytes.Buffer) int + String() string + GetInstructionIndex() int + GetIndex() int + GetText() string + GetOpName() string + GetTokens() TokenStream + SetInstructionIndex(val int) + SetIndex(int) + SetText(string) + SetOpName(string) + SetTokens(TokenStream) +} + +type BaseRewriteOperation struct { + //Current index of rewrites list + instructionIndex int + //Token buffer index + index int + //Substitution text + text string + //Actual operation name + opName string + //Pointer to token steam + tokens TokenStream +} + +func (op *BaseRewriteOperation) GetInstructionIndex() int { + return op.instructionIndex +} + +func (op *BaseRewriteOperation) GetIndex() int { + return op.index +} + +func (op *BaseRewriteOperation) GetText() string { + return op.text +} + +func (op *BaseRewriteOperation) GetOpName() string { + return op.opName +} + +func (op *BaseRewriteOperation) GetTokens() TokenStream { + return op.tokens +} + +func (op *BaseRewriteOperation) SetInstructionIndex(val int) { + op.instructionIndex = val +} + +func (op *BaseRewriteOperation) SetIndex(val int) { + op.index = val +} + +func (op *BaseRewriteOperation) SetText(val string) { + op.text = val +} + +func (op *BaseRewriteOperation) SetOpName(val string) { + op.opName = val +} + +func (op *BaseRewriteOperation) SetTokens(val TokenStream) { + op.tokens = val +} + +func (op *BaseRewriteOperation) Execute(_ *bytes.Buffer) int { + return op.index +} + +func (op *BaseRewriteOperation) String() string { + return fmt.Sprintf("<%s@%d:\"%s\">", + op.opName, + op.tokens.Get(op.GetIndex()), + op.text, + ) + +} + +type InsertBeforeOp struct { + BaseRewriteOperation +} + +func NewInsertBeforeOp(index int, text string, stream TokenStream) *InsertBeforeOp { + return &InsertBeforeOp{BaseRewriteOperation: BaseRewriteOperation{ + index: index, + text: text, + opName: "InsertBeforeOp", + tokens: stream, + }} +} + +func (op *InsertBeforeOp) Execute(buffer *bytes.Buffer) int { + buffer.WriteString(op.text) + if op.tokens.Get(op.index).GetTokenType() != TokenEOF { + buffer.WriteString(op.tokens.Get(op.index).GetText()) + } + return op.index + 1 +} + +func (op *InsertBeforeOp) String() string { + return op.BaseRewriteOperation.String() +} + +// InsertAfterOp distinguishes between insert after/before to do the "insert after" instructions +// first and then the "insert before" instructions at same index. Implementation +// of "insert after" is "insert before index+1". +type InsertAfterOp struct { + BaseRewriteOperation +} + +func NewInsertAfterOp(index int, text string, stream TokenStream) *InsertAfterOp { + return &InsertAfterOp{ + BaseRewriteOperation: BaseRewriteOperation{ + index: index + 1, + text: text, + tokens: stream, + }, + } +} + +func (op *InsertAfterOp) Execute(buffer *bytes.Buffer) int { + buffer.WriteString(op.text) + if op.tokens.Get(op.index).GetTokenType() != TokenEOF { + buffer.WriteString(op.tokens.Get(op.index).GetText()) + } + return op.index + 1 +} + +func (op *InsertAfterOp) String() string { + return op.BaseRewriteOperation.String() +} + +// ReplaceOp tries to replace range from x..y with (y-x)+1 ReplaceOp +// instructions. +type ReplaceOp struct { + BaseRewriteOperation + LastIndex int +} + +func NewReplaceOp(from, to int, text string, stream TokenStream) *ReplaceOp { + return &ReplaceOp{ + BaseRewriteOperation: BaseRewriteOperation{ + index: from, + text: text, + opName: "ReplaceOp", + tokens: stream, + }, + LastIndex: to, + } +} + +func (op *ReplaceOp) Execute(buffer *bytes.Buffer) int { + if op.text != "" { + buffer.WriteString(op.text) + } + return op.LastIndex + 1 +} + +func (op *ReplaceOp) String() string { + if op.text == "" { + return fmt.Sprintf("This is a one way link. It emanates from a state (usually via a list of +// transitions) and has a target state.
+// +//Since we never have to change the ATN transitions once we construct it, +// the states. We'll use the term Edge for the DFA to distinguish them from +// ATN transitions.
+ +type Transition interface { + getTarget() ATNState + setTarget(ATNState) + getIsEpsilon() bool + getLabel() *IntervalSet + getSerializationType() int + Matches(int, int, int) bool +} + +type BaseTransition struct { + target ATNState + isEpsilon bool + label int + intervalSet *IntervalSet + serializationType int +} + +func NewBaseTransition(target ATNState) *BaseTransition { + + if target == nil { + panic("target cannot be nil.") + } + + t := new(BaseTransition) + + t.target = target + // Are we epsilon, action, sempred? + t.isEpsilon = false + t.intervalSet = nil + + return t +} + +func (t *BaseTransition) getTarget() ATNState { + return t.target +} + +func (t *BaseTransition) setTarget(s ATNState) { + t.target = s +} + +func (t *BaseTransition) getIsEpsilon() bool { + return t.isEpsilon +} + +func (t *BaseTransition) getLabel() *IntervalSet { + return t.intervalSet +} + +func (t *BaseTransition) getSerializationType() int { + return t.serializationType +} + +func (t *BaseTransition) Matches(_, _, _ int) bool { + panic("Not implemented") +} + +const ( + TransitionEPSILON = 1 + TransitionRANGE = 2 + TransitionRULE = 3 + TransitionPREDICATE = 4 // e.g., {isType(input.LT(1))}? + TransitionATOM = 5 + TransitionACTION = 6 + TransitionSET = 7 // ~(A|B) or ~atom, wildcard, which convert to next 2 + TransitionNOTSET = 8 + TransitionWILDCARD = 9 + TransitionPRECEDENCE = 10 +) + +//goland:noinspection GoUnusedGlobalVariable +var TransitionserializationNames = []string{ + "INVALID", + "EPSILON", + "RANGE", + "RULE", + "PREDICATE", + "ATOM", + "ACTION", + "SET", + "NOT_SET", + "WILDCARD", + "PRECEDENCE", +} + +//var TransitionserializationTypes struct { +// EpsilonTransition int +// RangeTransition int +// RuleTransition int +// PredicateTransition int +// AtomTransition int +// ActionTransition int +// SetTransition int +// NotSetTransition int +// WildcardTransition int +// PrecedencePredicateTransition int +//}{ +// TransitionEPSILON, +// TransitionRANGE, +// TransitionRULE, +// TransitionPREDICATE, +// TransitionATOM, +// TransitionACTION, +// TransitionSET, +// TransitionNOTSET, +// TransitionWILDCARD, +// TransitionPRECEDENCE +//} + +// AtomTransition +// TODO: make all transitions sets? no, should remove set edges +type AtomTransition struct { + BaseTransition +} + +func NewAtomTransition(target ATNState, intervalSet int) *AtomTransition { + t := &AtomTransition{ + BaseTransition: BaseTransition{ + target: target, + serializationType: TransitionATOM, + label: intervalSet, + isEpsilon: false, + }, + } + t.intervalSet = t.makeLabel() + + return t +} + +func (t *AtomTransition) makeLabel() *IntervalSet { + s := NewIntervalSet() + s.addOne(t.label) + return s +} + +func (t *AtomTransition) Matches(symbol, _, _ int) bool { + return t.label == symbol +} + +func (t *AtomTransition) String() string { + return strconv.Itoa(t.label) +} + +type RuleTransition struct { + BaseTransition + followState ATNState + ruleIndex, precedence int +} + +func NewRuleTransition(ruleStart ATNState, ruleIndex, precedence int, followState ATNState) *RuleTransition { + return &RuleTransition{ + BaseTransition: BaseTransition{ + target: ruleStart, + isEpsilon: true, + serializationType: TransitionRULE, + }, + ruleIndex: ruleIndex, + precedence: precedence, + followState: followState, + } +} + +func (t *RuleTransition) Matches(_, _, _ int) bool { + return false +} + +type EpsilonTransition struct { + BaseTransition + outermostPrecedenceReturn int +} + +func NewEpsilonTransition(target ATNState, outermostPrecedenceReturn int) *EpsilonTransition { + return &EpsilonTransition{ + BaseTransition: BaseTransition{ + target: target, + serializationType: TransitionEPSILON, + isEpsilon: true, + }, + outermostPrecedenceReturn: outermostPrecedenceReturn, + } +} + +func (t *EpsilonTransition) Matches(_, _, _ int) bool { + return false +} + +func (t *EpsilonTransition) String() string { + return "epsilon" +} + +type RangeTransition struct { + BaseTransition + start, stop int +} + +func NewRangeTransition(target ATNState, start, stop int) *RangeTransition { + t := &RangeTransition{ + BaseTransition: BaseTransition{ + target: target, + serializationType: TransitionRANGE, + isEpsilon: false, + }, + start: start, + stop: stop, + } + t.intervalSet = t.makeLabel() + return t +} + +func (t *RangeTransition) makeLabel() *IntervalSet { + s := NewIntervalSet() + s.addRange(t.start, t.stop) + return s +} + +func (t *RangeTransition) Matches(symbol, _, _ int) bool { + return symbol >= t.start && symbol <= t.stop +} + +func (t *RangeTransition) String() string { + var sb strings.Builder + sb.WriteByte('\'') + sb.WriteRune(rune(t.start)) + sb.WriteString("'..'") + sb.WriteRune(rune(t.stop)) + sb.WriteByte('\'') + return sb.String() +} + +type AbstractPredicateTransition interface { + Transition + IAbstractPredicateTransitionFoo() +} + +type BaseAbstractPredicateTransition struct { + BaseTransition +} + +func NewBasePredicateTransition(target ATNState) *BaseAbstractPredicateTransition { + return &BaseAbstractPredicateTransition{ + BaseTransition: BaseTransition{ + target: target, + }, + } +} + +func (a *BaseAbstractPredicateTransition) IAbstractPredicateTransitionFoo() {} + +type PredicateTransition struct { + BaseAbstractPredicateTransition + isCtxDependent bool + ruleIndex, predIndex int +} + +func NewPredicateTransition(target ATNState, ruleIndex, predIndex int, isCtxDependent bool) *PredicateTransition { + return &PredicateTransition{ + BaseAbstractPredicateTransition: BaseAbstractPredicateTransition{ + BaseTransition: BaseTransition{ + target: target, + serializationType: TransitionPREDICATE, + isEpsilon: true, + }, + }, + isCtxDependent: isCtxDependent, + ruleIndex: ruleIndex, + predIndex: predIndex, + } +} + +func (t *PredicateTransition) Matches(_, _, _ int) bool { + return false +} + +func (t *PredicateTransition) getPredicate() *Predicate { + return NewPredicate(t.ruleIndex, t.predIndex, t.isCtxDependent) +} + +func (t *PredicateTransition) String() string { + return "pred_" + strconv.Itoa(t.ruleIndex) + ":" + strconv.Itoa(t.predIndex) +} + +type ActionTransition struct { + BaseTransition + isCtxDependent bool + ruleIndex, actionIndex, predIndex int +} + +func NewActionTransition(target ATNState, ruleIndex, actionIndex int, isCtxDependent bool) *ActionTransition { + return &ActionTransition{ + BaseTransition: BaseTransition{ + target: target, + serializationType: TransitionACTION, + isEpsilon: true, + }, + isCtxDependent: isCtxDependent, + ruleIndex: ruleIndex, + actionIndex: actionIndex, + } +} + +func (t *ActionTransition) Matches(_, _, _ int) bool { + return false +} + +func (t *ActionTransition) String() string { + return "action_" + strconv.Itoa(t.ruleIndex) + ":" + strconv.Itoa(t.actionIndex) +} + +type SetTransition struct { + BaseTransition +} + +func NewSetTransition(target ATNState, set *IntervalSet) *SetTransition { + t := &SetTransition{ + BaseTransition: BaseTransition{ + target: target, + serializationType: TransitionSET, + }, + } + + if set != nil { + t.intervalSet = set + } else { + t.intervalSet = NewIntervalSet() + t.intervalSet.addOne(TokenInvalidType) + } + return t +} + +func (t *SetTransition) Matches(symbol, _, _ int) bool { + return t.intervalSet.contains(symbol) +} + +func (t *SetTransition) String() string { + return t.intervalSet.String() +} + +type NotSetTransition struct { + SetTransition +} + +func NewNotSetTransition(target ATNState, set *IntervalSet) *NotSetTransition { + t := &NotSetTransition{ + SetTransition: SetTransition{ + BaseTransition: BaseTransition{ + target: target, + serializationType: TransitionNOTSET, + }, + }, + } + if set != nil { + t.intervalSet = set + } else { + t.intervalSet = NewIntervalSet() + t.intervalSet.addOne(TokenInvalidType) + } + + return t +} + +func (t *NotSetTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool { + return symbol >= minVocabSymbol && symbol <= maxVocabSymbol && !t.intervalSet.contains(symbol) +} + +func (t *NotSetTransition) String() string { + return "~" + t.intervalSet.String() +} + +type WildcardTransition struct { + BaseTransition +} + +func NewWildcardTransition(target ATNState) *WildcardTransition { + return &WildcardTransition{ + BaseTransition: BaseTransition{ + target: target, + serializationType: TransitionWILDCARD, + }, + } +} + +func (t *WildcardTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool { + return symbol >= minVocabSymbol && symbol <= maxVocabSymbol +} + +func (t *WildcardTransition) String() string { + return "." +} + +type PrecedencePredicateTransition struct { + BaseAbstractPredicateTransition + precedence int +} + +func NewPrecedencePredicateTransition(target ATNState, precedence int) *PrecedencePredicateTransition { + return &PrecedencePredicateTransition{ + BaseAbstractPredicateTransition: BaseAbstractPredicateTransition{ + BaseTransition: BaseTransition{ + target: target, + serializationType: TransitionPRECEDENCE, + isEpsilon: true, + }, + }, + precedence: precedence, + } +} + +func (t *PrecedencePredicateTransition) Matches(_, _, _ int) bool { + return false +} + +func (t *PrecedencePredicateTransition) getPredicate() *PrecedencePredicate { + return NewPrecedencePredicate(t.precedence) +} + +func (t *PrecedencePredicateTransition) String() string { + return fmt.Sprint(t.precedence) + " >= _p" +} diff --git a/vendor/github.com/antlr4-go/antlr/v4/tree.go b/vendor/github.com/antlr4-go/antlr/v4/tree.go new file mode 100644 index 000000000..c288420fb --- /dev/null +++ b/vendor/github.com/antlr4-go/antlr/v4/tree.go @@ -0,0 +1,304 @@ +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +// The basic notion of a tree has a parent, a payload, and a list of children. +// It is the most abstract interface for all the trees used by ANTLR. +/// + +var TreeInvalidInterval = NewInterval(-1, -2) + +type Tree interface { + GetParent() Tree + SetParent(Tree) + GetPayload() interface{} + GetChild(i int) Tree + GetChildCount() int + GetChildren() []Tree +} + +type SyntaxTree interface { + Tree + GetSourceInterval() Interval +} + +type ParseTree interface { + SyntaxTree + Accept(Visitor ParseTreeVisitor) interface{} + GetText() string + ToStringTree([]string, Recognizer) string +} + +type RuleNode interface { + ParseTree + GetRuleContext() RuleContext +} + +type TerminalNode interface { + ParseTree + GetSymbol() Token +} + +type ErrorNode interface { + TerminalNode + + errorNode() +} + +type ParseTreeVisitor interface { + Visit(tree ParseTree) interface{} + VisitChildren(node RuleNode) interface{} + VisitTerminal(node TerminalNode) interface{} + VisitErrorNode(node ErrorNode) interface{} +} + +type BaseParseTreeVisitor struct{} + +var _ ParseTreeVisitor = &BaseParseTreeVisitor{} + +func (v *BaseParseTreeVisitor) Visit(tree ParseTree) interface{} { return tree.Accept(v) } +func (v *BaseParseTreeVisitor) VisitChildren(_ RuleNode) interface{} { return nil } +func (v *BaseParseTreeVisitor) VisitTerminal(_ TerminalNode) interface{} { return nil } +func (v *BaseParseTreeVisitor) VisitErrorNode(_ ErrorNode) interface{} { return nil } + +// TODO: Implement this? +//func (this ParseTreeVisitor) Visit(ctx) { +// if (Utils.isArray(ctx)) { +// self := this +// return ctx.map(function(child) { return VisitAtom(self, child)}) +// } else { +// return VisitAtom(this, ctx) +// } +//} +// +//func VisitAtom(Visitor, ctx) { +// if (ctx.parser == nil) { //is terminal +// return +// } +// +// name := ctx.parser.ruleNames[ctx.ruleIndex] +// funcName := "Visit" + Utils.titleCase(name) +// +// return Visitor[funcName](ctx) +//} + +type ParseTreeListener interface { + VisitTerminal(node TerminalNode) + VisitErrorNode(node ErrorNode) + EnterEveryRule(ctx ParserRuleContext) + ExitEveryRule(ctx ParserRuleContext) +} + +type BaseParseTreeListener struct{} + +var _ ParseTreeListener = &BaseParseTreeListener{} + +func (l *BaseParseTreeListener) VisitTerminal(_ TerminalNode) {} +func (l *BaseParseTreeListener) VisitErrorNode(_ ErrorNode) {} +func (l *BaseParseTreeListener) EnterEveryRule(_ ParserRuleContext) {} +func (l *BaseParseTreeListener) ExitEveryRule(_ ParserRuleContext) {} + +type TerminalNodeImpl struct { + parentCtx RuleContext + symbol Token +} + +var _ TerminalNode = &TerminalNodeImpl{} + +func NewTerminalNodeImpl(symbol Token) *TerminalNodeImpl { + tn := new(TerminalNodeImpl) + + tn.parentCtx = nil + tn.symbol = symbol + + return tn +} + +func (t *TerminalNodeImpl) GetChild(_ int) Tree { + return nil +} + +func (t *TerminalNodeImpl) GetChildren() []Tree { + return nil +} + +func (t *TerminalNodeImpl) SetChildren(_ []Tree) { + panic("Cannot set children on terminal node") +} + +func (t *TerminalNodeImpl) GetSymbol() Token { + return t.symbol +} + +func (t *TerminalNodeImpl) GetParent() Tree { + return t.parentCtx +} + +func (t *TerminalNodeImpl) SetParent(tree Tree) { + t.parentCtx = tree.(RuleContext) +} + +func (t *TerminalNodeImpl) GetPayload() interface{} { + return t.symbol +} + +func (t *TerminalNodeImpl) GetSourceInterval() Interval { + if t.symbol == nil { + return TreeInvalidInterval + } + tokenIndex := t.symbol.GetTokenIndex() + return NewInterval(tokenIndex, tokenIndex) +} + +func (t *TerminalNodeImpl) GetChildCount() int { + return 0 +} + +func (t *TerminalNodeImpl) Accept(v ParseTreeVisitor) interface{} { + return v.VisitTerminal(t) +} + +func (t *TerminalNodeImpl) GetText() string { + return t.symbol.GetText() +} + +func (t *TerminalNodeImpl) String() string { + if t.symbol.GetTokenType() == TokenEOF { + return "