From 4b52f1e8c8514e6fb1108c1c4e28d3f4f017be74 Mon Sep 17 00:00:00 2001 From: Alberto Gutierrez Date: Mon, 1 Dec 2025 09:39:52 +0100 Subject: [PATCH 1/2] Kiali with Gevals Signed-off-by: Alberto Gutierrez --- .github/workflows/gevals.yaml | 13 + build/gevals.mk | 6 +- build/kiali.mk | 48 +++ dev/config/istio/bookinfo.yaml | 335 ++++++++++++++++++ dev/config/istio/kiali.yaml | 456 +++++++++++++++++++++++++ dev/config/istio/prometheus.yaml | 559 +++++++++++++++++++++++++++++++ 6 files changed, 1416 insertions(+), 1 deletion(-) create mode 100644 build/kiali.mk create mode 100644 dev/config/istio/bookinfo.yaml create mode 100644 dev/config/istio/kiali.yaml create mode 100644 dev/config/istio/prometheus.yaml diff --git a/.github/workflows/gevals.yaml b/.github/workflows/gevals.yaml index 532a7829..a5d03ff9 100644 --- a/.github/workflows/gevals.yaml +++ b/.github/workflows/gevals.yaml @@ -53,6 +53,7 @@ jobs: contains(github.event.comment.body, '/run-gevals')) outputs: should-run: ${{ steps.check.outputs.should-run }} + kiali-run: ${{ steps.check.outputs.kiali-run }} pr-number: ${{ steps.check.outputs.pr-number }} pr-ref: ${{ steps.check.outputs.pr-ref }} steps: @@ -77,6 +78,12 @@ jobs: echo "should-run=true" >> $GITHUB_OUTPUT echo "pr-ref=${{ github.ref }}" >> $GITHUB_OUTPUT fi + TASK_FILTER="${{ github.event.inputs.task-filter || '' }}" + if [[ "$TASK_FILTER" =~ kiali ]]; then + echo "kiali-run=true" >> $GITHUB_OUTPUT + else + echo "kiali-run=false" >> $GITHUB_OUTPUT + fi # Run gevals evaluation with Kind cluster run-evaluation: @@ -98,8 +105,14 @@ jobs: - name: Setup Kind cluster run: make kind-create-cluster KIND_CLUSTER_NAME=${{ env.KIND_CLUSTER_NAME }} + - name: Install Istio/Kiali and bookinfo demo + if: needs.check-trigger.outputs.kiali-run == 'true' + run: make setup-kiali + - name: Start MCP server run: make run-server + env: + TOOLSETS: ${{ needs.check-trigger.outputs.kiali-run == 'true' && 'kiali' || '' }} - name: Run gevals evaluation id: gevals diff --git a/build/gevals.mk b/build/gevals.mk index f36ec15c..efc14b29 100644 --- a/build/gevals.mk +++ b/build/gevals.mk @@ -7,7 +7,11 @@ MCP_HEALTH_INTERVAL ?= 2 .PHONY: run-server run-server: build ## Start MCP server in background and wait for health check @echo "Starting MCP server on port $(MCP_PORT)..." - @./$(BINARY_NAME) --port $(MCP_PORT) & echo $$! > .mcp-server.pid + @if [ -n "$(TOOLSETS)" ]; then \ + ./$(BINARY_NAME) --port $(MCP_PORT) --toolsets $(TOOLSETS) & echo $$! > .mcp-server.pid; \ + else \ + ./$(BINARY_NAME) --port $(MCP_PORT) & echo $$! > .mcp-server.pid; \ + fi @echo "MCP server started with PID $$(cat .mcp-server.pid)" @echo "Waiting for MCP server to be ready..." @elapsed=0; \ diff --git a/build/kiali.mk b/build/kiali.mk new file mode 100644 index 00000000..71ceef7d --- /dev/null +++ b/build/kiali.mk @@ -0,0 +1,48 @@ +# Kind cluster management + +KIND_CLUSTER_NAME ?= kubernetes-mcp-server + +# Detect container engine (docker or podman) +CONTAINER_ENGINE ?= $(shell command -v docker 2>/dev/null || command -v podman 2>/dev/null) + +##@ Istio + +ISTIOCTL = _output/bin/istioctl + +$(ISTIOCTL): + @mkdir -p _output/bin + @echo "Downloading istioctl..." + @set -e; \ + TMPDIR=$$(mktemp -d); \ + cd $$TMPDIR; \ + curl -sL https://istio.io/downloadIstio | sh -; \ + ISTIODIR=$$(ls -d istio-* | head -n1); \ + cp $$ISTIODIR/bin/istioctl $(PWD)/$(ISTIOCTL); \ + cd - >/dev/null; \ + rm -rf $$TMPDIR; \ + echo "istioctl installed at $(ISTIOCTL)" + +.PHONY: istioctl +istioctl: $(ISTIOCTL) ## Ensure istioctl is installed to _output/bin/ + +.PHONY: install-istio +install-istio: istioctl ## Install Istio (demo profile) and enable sidecar injection in default ns + ./$(ISTIOCTL) install --set profile=demo -y + kubectl label namespace default istio-injection=enabled --overwrite + +.PHONY: install-istio-addons +install-istio-addons: install-istio ## Install Istio addons + kubectl apply -f dev/config/istio/prometheus.yaml -n istio-system + kubectl apply -f dev/config/istio/kiali.yaml -n istio-system + kubectl wait --namespace istio-system --for=condition=available deployment/kiali --timeout=300s + kubectl wait --namespace istio-system --for=condition=available deployment/prometheus --timeout=300s + +.PHONY: install-bookinfo-demo +install-bookinfo-demo: ## Install Bookinfo demo + kubectl create ns bookinfo + kubectl label namespace bookinfo istio-discovery=enabled istio.io/rev=default istio-injection=enabled + kubectl apply -f dev/config/istio/bookinfo.yaml -n bookinfo + kubectl wait --for=condition=Ready pod --all -n bookinfo --timeout=300s + +.PHONY: setup-kiali +setup-kiali: install-istio-addons install-bookinfo-demo ## Setup Kiali \ No newline at end of file diff --git a/dev/config/istio/bookinfo.yaml b/dev/config/istio/bookinfo.yaml new file mode 100644 index 00000000..b878ed5d --- /dev/null +++ b/dev/config/istio/bookinfo.yaml @@ -0,0 +1,335 @@ +# Copyright Istio Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +################################################################################################## +# This file defines the services, service accounts, and deployments for the Bookinfo sample. +# +# To apply all 4 Bookinfo services, their corresponding service accounts, and deployments: +# +# kubectl apply -f samples/bookinfo/platform/kube/bookinfo.yaml +# +# Alternatively, you can deploy any resource separately: +# +# kubectl apply -f samples/bookinfo/platform/kube/bookinfo.yaml -l service=reviews # reviews Service +# kubectl apply -f samples/bookinfo/platform/kube/bookinfo.yaml -l account=reviews # reviews ServiceAccount +# kubectl apply -f samples/bookinfo/platform/kube/bookinfo.yaml -l app=reviews,version=v3 # reviews-v3 Deployment +################################################################################################## + +################################################################################################## +# Details service +################################################################################################## +apiVersion: v1 +kind: Service +metadata: + name: details + labels: + app: details + service: details +spec: + ports: + - port: 9080 + name: http + selector: + app: details +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: bookinfo-details + labels: + account: details +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: details-v1 + labels: + app: details + version: v1 +spec: + replicas: 1 + selector: + matchLabels: + app: details + version: v1 + template: + metadata: + labels: + app: details + version: v1 + spec: + serviceAccountName: bookinfo-details + containers: + - name: details + image: docker.io/istio/examples-bookinfo-details-v1:1.20.3 + imagePullPolicy: IfNotPresent + ports: + - containerPort: 9080 +--- +################################################################################################## +# Ratings service +################################################################################################## +apiVersion: v1 +kind: Service +metadata: + name: ratings + labels: + app: ratings + service: ratings +spec: + ports: + - port: 9080 + name: http + selector: + app: ratings +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: bookinfo-ratings + labels: + account: ratings +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: ratings-v1 + labels: + app: ratings + version: v1 +spec: + replicas: 1 + selector: + matchLabels: + app: ratings + version: v1 + template: + metadata: + labels: + app: ratings + version: v1 + spec: + serviceAccountName: bookinfo-ratings + containers: + - name: ratings + image: docker.io/istio/examples-bookinfo-ratings-v1:1.20.3 + imagePullPolicy: IfNotPresent + ports: + - containerPort: 9080 +--- +################################################################################################## +# Reviews service +################################################################################################## +apiVersion: v1 +kind: Service +metadata: + name: reviews + labels: + app: reviews + service: reviews +spec: + ports: + - port: 9080 + name: http + selector: + app: reviews +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: bookinfo-reviews + labels: + account: reviews +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: reviews-v1 + labels: + app: reviews + version: v1 +spec: + replicas: 1 + selector: + matchLabels: + app: reviews + version: v1 + template: + metadata: + labels: + app: reviews + version: v1 + spec: + serviceAccountName: bookinfo-reviews + containers: + - name: reviews + image: docker.io/istio/examples-bookinfo-reviews-v1:1.20.3 + imagePullPolicy: IfNotPresent + env: + - name: LOG_DIR + value: "/tmp/logs" + ports: + - containerPort: 9080 + volumeMounts: + - name: tmp + mountPath: /tmp + - name: wlp-output + mountPath: /opt/ibm/wlp/output + volumes: + - name: wlp-output + emptyDir: {} + - name: tmp + emptyDir: {} +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: reviews-v2 + labels: + app: reviews + version: v2 +spec: + replicas: 1 + selector: + matchLabels: + app: reviews + version: v2 + template: + metadata: + labels: + app: reviews + version: v2 + spec: + serviceAccountName: bookinfo-reviews + containers: + - name: reviews + image: docker.io/istio/examples-bookinfo-reviews-v2:1.20.3 + imagePullPolicy: IfNotPresent + env: + - name: LOG_DIR + value: "/tmp/logs" + ports: + - containerPort: 9080 + volumeMounts: + - name: tmp + mountPath: /tmp + - name: wlp-output + mountPath: /opt/ibm/wlp/output + volumes: + - name: wlp-output + emptyDir: {} + - name: tmp + emptyDir: {} +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: reviews-v3 + labels: + app: reviews + version: v3 +spec: + replicas: 1 + selector: + matchLabels: + app: reviews + version: v3 + template: + metadata: + labels: + app: reviews + version: v3 + spec: + serviceAccountName: bookinfo-reviews + containers: + - name: reviews + image: docker.io/istio/examples-bookinfo-reviews-v3:1.20.3 + imagePullPolicy: IfNotPresent + env: + - name: LOG_DIR + value: "/tmp/logs" + ports: + - containerPort: 9080 + volumeMounts: + - name: tmp + mountPath: /tmp + - name: wlp-output + mountPath: /opt/ibm/wlp/output + volumes: + - name: wlp-output + emptyDir: {} + - name: tmp + emptyDir: {} +--- +################################################################################################## +# Productpage services +################################################################################################## +apiVersion: v1 +kind: Service +metadata: + name: productpage + labels: + app: productpage + service: productpage +spec: + ports: + - port: 9080 + name: http + selector: + app: productpage +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: bookinfo-productpage + labels: + account: productpage +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: productpage-v1 + labels: + app: productpage + version: v1 +spec: + replicas: 1 + selector: + matchLabels: + app: productpage + version: v1 + template: + metadata: + annotations: + prometheus.io/scrape: "true" + prometheus.io/port: "9080" + prometheus.io/path: "/metrics" + labels: + app: productpage + version: v1 + spec: + serviceAccountName: bookinfo-productpage + containers: + - name: productpage + image: docker.io/istio/examples-bookinfo-productpage-v1:1.20.3 + imagePullPolicy: IfNotPresent + ports: + - containerPort: 9080 + volumeMounts: + - name: tmp + mountPath: /tmp + volumes: + - name: tmp + emptyDir: {} +--- \ No newline at end of file diff --git a/dev/config/istio/kiali.yaml b/dev/config/istio/kiali.yaml new file mode 100644 index 00000000..3b8e35a9 --- /dev/null +++ b/dev/config/istio/kiali.yaml @@ -0,0 +1,456 @@ +--- +# Source: kiali-server/templates/serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: kiali + namespace: "istio-system" + labels: + + helm.sh/chart: kiali-server-2.17.0 + app: kiali + app.kubernetes.io/name: kiali + app.kubernetes.io/instance: kiali + version: "v2.17.0" + app.kubernetes.io/version: "v2.17.0" + app.kubernetes.io/part-of: "kiali" +... +--- +# Source: kiali-server/templates/configmap.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: kiali + namespace: "istio-system" + labels: + + helm.sh/chart: kiali-server-2.17.0 + app: kiali + app.kubernetes.io/name: kiali + app.kubernetes.io/instance: kiali + version: "v2.17.0" + app.kubernetes.io/version: "v2.17.0" + app.kubernetes.io/part-of: "kiali" +data: + config.yaml: | + additional_display_details: + - annotation: kiali.io/api-spec + icon_annotation: kiali.io/api-type + title: API Documentation + auth: + openid: {} + openshift: + client_id_prefix: kiali + strategy: anonymous + clustering: + autodetect_secrets: + enabled: true + label: kiali.io/multiCluster=true + clusters: [] + deployment: + additional_service_yaml: {} + affinity: + node: {} + pod: {} + pod_anti: {} + cluster_wide_access: true + configmap_annotations: {} + custom_envs: [] + custom_secrets: [] + dns: + config: {} + policy: "" + extra_labels: {} + host_aliases: [] + hpa: + api_version: autoscaling/v2 + spec: {} + image_digest: "" + image_name: quay.io/kiali/kiali + image_pull_policy: IfNotPresent + image_pull_secrets: [] + image_version: v2.17 + ingress: + additional_labels: {} + class_name: nginx + override_yaml: + metadata: {} + ingress_enabled: false + instance_name: kiali + logger: + log_format: text + log_level: info + sampler_rate: "1" + time_field_format: 2006-01-02T15:04:05Z07:00 + namespace: istio-system + network_policy: + enabled: false + node_selector: {} + pod_annotations: + proxy.istio.io/config: '{ "holdApplicationUntilProxyStarts": true }' + pod_labels: + sidecar.istio.io/inject: "false" + priority_class_name: "" + probes: + liveness: + initial_delay_seconds: 5 + period_seconds: 30 + readiness: + initial_delay_seconds: 5 + period_seconds: 30 + startup: + failure_threshold: 6 + initial_delay_seconds: 30 + period_seconds: 10 + remote_cluster_resources_only: false + replicas: 1 + resources: + limits: + memory: 1Gi + requests: + cpu: 10m + memory: 64Mi + secret_name: kiali + security_context: {} + service_annotations: {} + service_type: "" + tolerations: [] + topology_spread_constraints: [] + version_label: v2.17.0 + view_only_mode: false + external_services: + custom_dashboards: + enabled: true + istio: + root_namespace: istio-system + tracing: + enabled: false + identity: + cert_file: "" + private_key_file: "" + kiali_feature_flags: + custom_workload_types: [] + disabled_features: [] + validations: + ignore: + - KIA1301 + login_token: + signing_key: CHANGEME00000000 + server: + observability: + metrics: + enabled: true + port: 9090 + port: 20001 + web_root: /kiali + skipResources: [] +... +--- +# Source: kiali-server/templates/role.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: kiali + labels: + + helm.sh/chart: kiali-server-2.17.0 + app: kiali + app.kubernetes.io/name: kiali + app.kubernetes.io/instance: kiali + version: "v2.17.0" + app.kubernetes.io/version: "v2.17.0" + app.kubernetes.io/part-of: "kiali" +rules: +- apiGroups: [""] + resources: + - configmaps + - endpoints + - pods/log + verbs: + - get + - list + - watch +- apiGroups: [""] + resources: + - namespaces + - pods + - replicationcontrollers + - services + verbs: + - get + - list + - watch + - patch +- apiGroups: [""] + resources: + - pods/portforward + verbs: + - create + - post +- apiGroups: ["extensions", "apps"] + resources: + - daemonsets + - deployments + - replicasets + - statefulsets + verbs: + - get + - list + - watch + - patch +- apiGroups: ["batch"] + resources: + - cronjobs + - jobs + verbs: + - get + - list + - watch + - patch +- apiGroups: + - networking.istio.io + - security.istio.io + - extensions.istio.io + - telemetry.istio.io + - gateway.networking.k8s.io + - inference.networking.k8s.io + resources: ["*"] + verbs: + - get + - list + - watch + - create + - delete + - patch +- apiGroups: ["apps.openshift.io"] + resources: + - deploymentconfigs + verbs: + - get + - list + - watch + - patch +- apiGroups: ["route.openshift.io"] + resources: + - routes + verbs: + - get +- apiGroups: ["authentication.k8s.io"] + resources: + - tokenreviews + verbs: + - create +- apiGroups: ["oauth.openshift.io"] + resources: + - oauthclients + resourceNames: + - kiali-istio-system + verbs: + - get +- apiGroups: ["admissionregistration.k8s.io"] + resources: + - mutatingwebhookconfigurations + verbs: + - get + - list + - watch +... +--- +# Source: kiali-server/templates/rolebinding.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: kiali + labels: + + helm.sh/chart: kiali-server-2.17.0 + app: kiali + app.kubernetes.io/name: kiali + app.kubernetes.io/instance: kiali + version: "v2.17.0" + app.kubernetes.io/version: "v2.17.0" + app.kubernetes.io/part-of: "kiali" +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: kiali +subjects: +- kind: ServiceAccount + name: kiali + namespace: "istio-system" +... +--- +# Source: kiali-server/templates/service.yaml +apiVersion: v1 +kind: Service +metadata: + name: kiali + namespace: "istio-system" + labels: + + helm.sh/chart: kiali-server-2.17.0 + app: kiali + app.kubernetes.io/name: kiali + app.kubernetes.io/instance: kiali + version: "v2.17.0" + app.kubernetes.io/version: "v2.17.0" + app.kubernetes.io/part-of: "kiali" + annotations: +spec: + ports: + - name: http + appProtocol: http + protocol: TCP + port: 20001 + - name: http-metrics + appProtocol: http + protocol: TCP + port: 9090 + selector: + app.kubernetes.io/name: kiali + app.kubernetes.io/instance: kiali +... +--- +# Source: kiali-server/templates/deployment.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: kiali + namespace: "istio-system" + labels: + + helm.sh/chart: kiali-server-2.17.0 + app: kiali + app.kubernetes.io/name: kiali + app.kubernetes.io/instance: kiali + version: "v2.17.0" + app.kubernetes.io/version: "v2.17.0" + app.kubernetes.io/part-of: "kiali" +spec: + replicas: 1 + selector: + matchLabels: + app.kubernetes.io/name: kiali + app.kubernetes.io/instance: kiali + strategy: + rollingUpdate: + maxSurge: 1 + maxUnavailable: 1 + type: RollingUpdate + template: + metadata: + name: kiali + labels: + + helm.sh/chart: kiali-server-2.17.0 + app: kiali + app.kubernetes.io/name: kiali + app.kubernetes.io/instance: kiali + version: "v2.17.0" + app.kubernetes.io/version: "v2.17.0" + app.kubernetes.io/part-of: "kiali" + sidecar.istio.io/inject: "false" + annotations: + checksum/config: 5129658cb79b9fbbb9b7745ea8ff77c611538e6ce760b6ad5554d7c85a6b79c1 + prometheus.io/scrape: "true" + prometheus.io/port: "9090" + kiali.io/dashboards: go,kiali + proxy.istio.io/config: '{ "holdApplicationUntilProxyStarts": true }' + spec: + serviceAccountName: kiali + containers: + - image: "quay.io/kiali/kiali:v2.17" + imagePullPolicy: IfNotPresent + name: kiali + command: + - "/opt/kiali/kiali" + - "-config" + - "/kiali-configuration/config.yaml" + terminationMessagePolicy: FallbackToLogsOnError + securityContext: + allowPrivilegeEscalation: false + privileged: false + readOnlyRootFilesystem: true + runAsNonRoot: true + capabilities: + drop: + - ALL + ports: + - name: api-port + containerPort: 20001 + - name: http-metrics + containerPort: 9090 + readinessProbe: + httpGet: + path: /kiali/healthz + port: api-port + scheme: HTTP + initialDelaySeconds: 5 + periodSeconds: 30 + livenessProbe: + httpGet: + path: /kiali/healthz + port: api-port + scheme: HTTP + initialDelaySeconds: 5 + periodSeconds: 30 + startupProbe: + httpGet: + path: /kiali/healthz + port: api-port + scheme: HTTP + failureThreshold: 6 + initialDelaySeconds: 30 + periodSeconds: 10 + env: + - name: ACTIVE_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: LOG_LEVEL + value: "info" + - name: LOG_FORMAT + value: "text" + - name: LOG_TIME_FIELD_FORMAT + value: "2006-01-02T15:04:05Z07:00" + - name: LOG_SAMPLER_RATE + value: "1" + volumeMounts: + - name: kiali-configuration + mountPath: "/kiali-configuration" + - name: kiali-cert + mountPath: "/kiali-cert" + - name: kiali-secret + mountPath: "/kiali-secret" + - name: kiali-cabundle + mountPath: "/kiali-cabundle" + - name: "kiali-multi-cluster-secret" + mountPath: "/kiali-remote-cluster-secrets/kiali-multi-cluster-secret" + readOnly: true + resources: + limits: + memory: 1Gi + requests: + cpu: 10m + memory: 64Mi + volumes: + - name: kiali-configuration + configMap: + name: kiali + - name: kiali-cert + secret: + secretName: istio.kiali-service-account + optional: true + - name: kiali-secret + secret: + secretName: kiali + optional: true + - name: kiali-cabundle + configMap: + name: kiali-cabundle + optional: true + - name: "kiali-multi-cluster-secret" + secret: + secretName: "kiali-multi-cluster-secret" + optional: true +... \ No newline at end of file diff --git a/dev/config/istio/prometheus.yaml b/dev/config/istio/prometheus.yaml new file mode 100644 index 00000000..ebfe11a5 --- /dev/null +++ b/dev/config/istio/prometheus.yaml @@ -0,0 +1,559 @@ +--- +# Source: prometheus/templates/serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + app.kubernetes.io/component: server + app.kubernetes.io/name: prometheus + app.kubernetes.io/instance: prometheus + app.kubernetes.io/version: v3.5.0 + helm.sh/chart: prometheus-27.37.0 + app.kubernetes.io/part-of: prometheus + name: prometheus + namespace: istio-system + annotations: + {} +--- +# Source: prometheus/templates/cm.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + labels: + app.kubernetes.io/component: server + app.kubernetes.io/name: prometheus + app.kubernetes.io/instance: prometheus + app.kubernetes.io/version: v3.5.0 + helm.sh/chart: prometheus-27.37.0 + app.kubernetes.io/part-of: prometheus + name: prometheus + namespace: istio-system +data: + allow-snippet-annotations: "false" + alerting_rules.yml: | + {} + alerts: | + {} + prometheus.yml: | + global: + evaluation_interval: 1m + scrape_interval: 15s + scrape_timeout: 10s + rule_files: + - /etc/config/recording_rules.yml + - /etc/config/alerting_rules.yml + - /etc/config/rules + - /etc/config/alerts + scrape_configs: + - job_name: prometheus + static_configs: + - targets: + - localhost:9090 + - bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + job_name: kubernetes-apiservers + kubernetes_sd_configs: + - role: endpoints + relabel_configs: + - action: keep + regex: default;kubernetes;https + source_labels: + - __meta_kubernetes_namespace + - __meta_kubernetes_service_name + - __meta_kubernetes_endpoint_port_name + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + - bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + job_name: kubernetes-nodes + kubernetes_sd_configs: + - role: node + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - replacement: kubernetes.default.svc:443 + target_label: __address__ + - regex: (.+) + replacement: /api/v1/nodes/$1/proxy/metrics + source_labels: + - __meta_kubernetes_node_name + target_label: __metrics_path__ + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + - bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + job_name: kubernetes-nodes-cadvisor + kubernetes_sd_configs: + - role: node + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - replacement: kubernetes.default.svc:443 + target_label: __address__ + - regex: (.+) + replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor + source_labels: + - __meta_kubernetes_node_name + target_label: __metrics_path__ + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + - honor_labels: true + job_name: kubernetes-service-endpoints + kubernetes_sd_configs: + - role: endpoints + relabel_configs: + - action: keep + regex: true + source_labels: + - __meta_kubernetes_service_annotation_prometheus_io_scrape + - action: drop + regex: true + source_labels: + - __meta_kubernetes_service_annotation_prometheus_io_scrape_slow + - action: replace + regex: (https?) + source_labels: + - __meta_kubernetes_service_annotation_prometheus_io_scheme + target_label: __scheme__ + - action: replace + regex: (.+) + source_labels: + - __meta_kubernetes_service_annotation_prometheus_io_path + target_label: __metrics_path__ + - action: replace + regex: (.+?)(?::\d+)?;(\d+) + replacement: $1:$2 + source_labels: + - __address__ + - __meta_kubernetes_service_annotation_prometheus_io_port + target_label: __address__ + - action: labelmap + regex: __meta_kubernetes_service_annotation_prometheus_io_param_(.+) + replacement: __param_$1 + - action: labelmap + regex: __meta_kubernetes_service_label_(.+) + - action: replace + source_labels: + - __meta_kubernetes_namespace + target_label: namespace + - action: replace + source_labels: + - __meta_kubernetes_service_name + target_label: service + - action: replace + source_labels: + - __meta_kubernetes_pod_node_name + target_label: node + - honor_labels: true + job_name: kubernetes-service-endpoints-slow + kubernetes_sd_configs: + - role: endpoints + relabel_configs: + - action: keep + regex: true + source_labels: + - __meta_kubernetes_service_annotation_prometheus_io_scrape_slow + - action: replace + regex: (https?) + source_labels: + - __meta_kubernetes_service_annotation_prometheus_io_scheme + target_label: __scheme__ + - action: replace + regex: (.+) + source_labels: + - __meta_kubernetes_service_annotation_prometheus_io_path + target_label: __metrics_path__ + - action: replace + regex: (.+?)(?::\d+)?;(\d+) + replacement: $1:$2 + source_labels: + - __address__ + - __meta_kubernetes_service_annotation_prometheus_io_port + target_label: __address__ + - action: labelmap + regex: __meta_kubernetes_service_annotation_prometheus_io_param_(.+) + replacement: __param_$1 + - action: labelmap + regex: __meta_kubernetes_service_label_(.+) + - action: replace + source_labels: + - __meta_kubernetes_namespace + target_label: namespace + - action: replace + source_labels: + - __meta_kubernetes_service_name + target_label: service + - action: replace + source_labels: + - __meta_kubernetes_pod_node_name + target_label: node + scrape_interval: 5m + scrape_timeout: 30s + - honor_labels: true + job_name: prometheus-pushgateway + kubernetes_sd_configs: + - role: service + relabel_configs: + - action: keep + regex: pushgateway + source_labels: + - __meta_kubernetes_service_annotation_prometheus_io_probe + - honor_labels: true + job_name: kubernetes-services + kubernetes_sd_configs: + - role: service + metrics_path: /probe + params: + module: + - http_2xx + relabel_configs: + - action: keep + regex: true + source_labels: + - __meta_kubernetes_service_annotation_prometheus_io_probe + - source_labels: + - __address__ + target_label: __param_target + - replacement: blackbox + target_label: __address__ + - source_labels: + - __param_target + target_label: instance + - action: labelmap + regex: __meta_kubernetes_service_label_(.+) + - source_labels: + - __meta_kubernetes_namespace + target_label: namespace + - source_labels: + - __meta_kubernetes_service_name + target_label: service + - honor_labels: true + job_name: kubernetes-pods + kubernetes_sd_configs: + - role: pod + relabel_configs: + - action: keep + regex: true + source_labels: + - __meta_kubernetes_pod_annotation_prometheus_io_scrape + - action: drop + regex: true + source_labels: + - __meta_kubernetes_pod_annotation_prometheus_io_scrape_slow + - action: replace + regex: (https?) + source_labels: + - __meta_kubernetes_pod_annotation_prometheus_io_scheme + target_label: __scheme__ + - action: replace + regex: (.+) + source_labels: + - __meta_kubernetes_pod_annotation_prometheus_io_path + target_label: __metrics_path__ + - action: replace + regex: (\d+);(([A-Fa-f0-9]{1,4}::?){1,7}[A-Fa-f0-9]{1,4}) + replacement: '[$2]:$1' + source_labels: + - __meta_kubernetes_pod_annotation_prometheus_io_port + - __meta_kubernetes_pod_ip + target_label: __address__ + - action: replace + regex: (\d+);((([0-9]+?)(\.|$)){4}) + replacement: $2:$1 + source_labels: + - __meta_kubernetes_pod_annotation_prometheus_io_port + - __meta_kubernetes_pod_ip + target_label: __address__ + - action: labelmap + regex: __meta_kubernetes_pod_annotation_prometheus_io_param_(.+) + replacement: __param_$1 + - action: labelmap + regex: __meta_kubernetes_pod_label_(.+) + - action: replace + source_labels: + - __meta_kubernetes_namespace + target_label: namespace + - action: replace + source_labels: + - __meta_kubernetes_pod_name + target_label: pod + - action: drop + regex: Pending|Succeeded|Failed|Completed + source_labels: + - __meta_kubernetes_pod_phase + - action: replace + source_labels: + - __meta_kubernetes_pod_node_name + target_label: node + - honor_labels: true + job_name: kubernetes-pods-slow + kubernetes_sd_configs: + - role: pod + relabel_configs: + - action: keep + regex: true + source_labels: + - __meta_kubernetes_pod_annotation_prometheus_io_scrape_slow + - action: replace + regex: (https?) + source_labels: + - __meta_kubernetes_pod_annotation_prometheus_io_scheme + target_label: __scheme__ + - action: replace + regex: (.+) + source_labels: + - __meta_kubernetes_pod_annotation_prometheus_io_path + target_label: __metrics_path__ + - action: replace + regex: (\d+);(([A-Fa-f0-9]{1,4}::?){1,7}[A-Fa-f0-9]{1,4}) + replacement: '[$2]:$1' + source_labels: + - __meta_kubernetes_pod_annotation_prometheus_io_port + - __meta_kubernetes_pod_ip + target_label: __address__ + - action: replace + regex: (\d+);((([0-9]+?)(\.|$)){4}) + replacement: $2:$1 + source_labels: + - __meta_kubernetes_pod_annotation_prometheus_io_port + - __meta_kubernetes_pod_ip + target_label: __address__ + - action: labelmap + regex: __meta_kubernetes_pod_annotation_prometheus_io_param_(.+) + replacement: __param_$1 + - action: labelmap + regex: __meta_kubernetes_pod_label_(.+) + - action: replace + source_labels: + - __meta_kubernetes_namespace + target_label: namespace + - action: replace + source_labels: + - __meta_kubernetes_pod_name + target_label: pod + - action: drop + regex: Pending|Succeeded|Failed|Completed + source_labels: + - __meta_kubernetes_pod_phase + - action: replace + source_labels: + - __meta_kubernetes_pod_node_name + target_label: node + scrape_interval: 5m + scrape_timeout: 30s + recording_rules.yml: | + {} + rules: | + {} +--- +# Source: prometheus/templates/clusterrole.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/component: server + app.kubernetes.io/name: prometheus + app.kubernetes.io/instance: prometheus + app.kubernetes.io/version: v3.5.0 + helm.sh/chart: prometheus-27.37.0 + app.kubernetes.io/part-of: prometheus + name: prometheus +rules: + - apiGroups: + - "" + resources: + - nodes + - nodes/proxy + - nodes/metrics + - services + - endpoints + - pods + - ingresses + - configmaps + verbs: + - get + - list + - watch + - apiGroups: + - "networking.k8s.io" + resources: + - ingresses/status + - ingresses + verbs: + - get + - list + - watch + - apiGroups: + - "discovery.k8s.io" + resources: + - endpointslices + verbs: + - get + - list + - watch + - nonResourceURLs: + - "/metrics" + verbs: + - get +--- +# Source: prometheus/templates/clusterrolebinding.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: + app.kubernetes.io/component: server + app.kubernetes.io/name: prometheus + app.kubernetes.io/instance: prometheus + app.kubernetes.io/version: v3.5.0 + helm.sh/chart: prometheus-27.37.0 + app.kubernetes.io/part-of: prometheus + name: prometheus +subjects: + - kind: ServiceAccount + name: prometheus + namespace: istio-system +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: prometheus +--- +# Source: prometheus/templates/service.yaml +apiVersion: v1 +kind: Service +metadata: + labels: + app.kubernetes.io/component: server + app.kubernetes.io/name: prometheus + app.kubernetes.io/instance: prometheus + app.kubernetes.io/version: v3.5.0 + helm.sh/chart: prometheus-27.37.0 + app.kubernetes.io/part-of: prometheus + name: prometheus + namespace: istio-system +spec: + ports: + - name: http + port: 9090 + protocol: TCP + targetPort: 9090 + selector: + app.kubernetes.io/component: server + app.kubernetes.io/name: prometheus + app.kubernetes.io/instance: prometheus + sessionAffinity: None + type: "ClusterIP" +--- +# Source: prometheus/templates/deploy.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app.kubernetes.io/component: server + app.kubernetes.io/name: prometheus + app.kubernetes.io/instance: prometheus + app.kubernetes.io/version: v3.5.0 + helm.sh/chart: prometheus-27.37.0 + app.kubernetes.io/part-of: prometheus + name: prometheus + namespace: istio-system +spec: + strategy: + type: Recreate + rollingUpdate: null + selector: + matchLabels: + app.kubernetes.io/component: server + app.kubernetes.io/name: prometheus + app.kubernetes.io/instance: prometheus + replicas: 1 + revisionHistoryLimit: 10 + template: + metadata: + labels: + app.kubernetes.io/component: server + app.kubernetes.io/name: prometheus + app.kubernetes.io/instance: prometheus + app.kubernetes.io/version: v3.5.0 + helm.sh/chart: prometheus-27.37.0 + app.kubernetes.io/part-of: prometheus + + sidecar.istio.io/inject: "false" + spec: + enableServiceLinks: true + serviceAccountName: prometheus + containers: + - name: prometheus-server-configmap-reload + image: "ghcr.io/prometheus-operator/prometheus-config-reloader:v0.85.0" + imagePullPolicy: "IfNotPresent" + args: + - --watched-dir=/etc/config + - --listen-address=0.0.0.0:8080 + - --reload-url=http://127.0.0.1:9090/-/reload + ports: + - containerPort: 8080 + name: metrics + livenessProbe: + httpGet: + path: /healthz + port: metrics + scheme: HTTP + initialDelaySeconds: 2 + periodSeconds: 10 + readinessProbe: + httpGet: + path: /healthz + port: metrics + scheme: HTTP + periodSeconds: 10 + volumeMounts: + - name: config-volume + mountPath: /etc/config + readOnly: true + + - name: prometheus-server + image: "docker.io/prom/prometheus:v3.5.0" + imagePullPolicy: "IfNotPresent" + args: + - --storage.tsdb.retention.time=15d + - --config.file=/etc/config/prometheus.yml + - --storage.tsdb.path=/data + - --web.console.libraries=/etc/prometheus/console_libraries + - --web.console.templates=/etc/prometheus/consoles + - --web.enable-lifecycle + ports: + - containerPort: 9090 + readinessProbe: + httpGet: + path: /-/ready + port: 9090 + scheme: HTTP + initialDelaySeconds: 0 + periodSeconds: 5 + timeoutSeconds: 4 + failureThreshold: 3 + successThreshold: 1 + livenessProbe: + httpGet: + path: /-/healthy + port: 9090 + scheme: HTTP + initialDelaySeconds: 30 + periodSeconds: 15 + timeoutSeconds: 10 + failureThreshold: 3 + successThreshold: 1 + volumeMounts: + - name: config-volume + mountPath: /etc/config + - name: storage-volume + mountPath: /data + subPath: "" + dnsPolicy: ClusterFirst + terminationGracePeriodSeconds: 300 + volumes: + - name: config-volume + configMap: + name: prometheus + - name: storage-volume + emptyDir: + {} \ No newline at end of file From 499d406ea39dd8d252c5759a8f7f0bf18777e6b0 Mon Sep 17 00:00:00 2001 From: Alberto Gutierrez Date: Mon, 1 Dec 2025 11:40:23 +0100 Subject: [PATCH 2/2] Add tasks to evals Signed-off-by: Alberto Gutierrez --- .../kiali-istio-create.yaml | 31 +++++ .../kiali-istio-delete.yaml | 70 +++++++++++ .../kiali-istio-list/kiali-istio-list.yaml | 48 ++++++++ .../kiali-istio-patch/kiali-istio-patch.yaml | 110 ++++++++++++++++++ .../kiali-obs-unhealthy-namespaces.yaml | 17 +++ .../kiali-resource-get-namespaces.yaml | 17 +++ .../kiali-resource-get-service-detail.yaml | 17 +++ .../kiali-resource-list-workloads.yaml | 19 +++ .../kiali-resource-mesh-status.yaml | 60 ++++++++++ .../kiali-show-topology.yaml | 17 +++ .../kiali-status-kiali-istio.yaml | 17 +++ .../kiali-troubleshooting-latency-traces.yaml | 17 +++ .../kiali-troubleshooting-log.yaml | 45 +++++++ .../kiali-troubleshooting-trace-lagging.yaml | 69 +++++++++++ 14 files changed, 554 insertions(+) create mode 100644 evals/tasks/kiali-istio-create/kiali-istio-create.yaml create mode 100644 evals/tasks/kiali-istio-delete/kiali-istio-delete.yaml create mode 100644 evals/tasks/kiali-istio-list/kiali-istio-list.yaml create mode 100644 evals/tasks/kiali-istio-patch/kiali-istio-patch.yaml create mode 100644 evals/tasks/kiali-obs-unhealthy-namespaces/kiali-obs-unhealthy-namespaces.yaml create mode 100644 evals/tasks/kiali-resource-get-namespaces/kiali-resource-get-namespaces.yaml create mode 100644 evals/tasks/kiali-resource-get-service-detail/kiali-resource-get-service-detail.yaml create mode 100644 evals/tasks/kiali-resource-list-workloads/kiali-resource-list-workloads.yaml create mode 100644 evals/tasks/kiali-resource-mesh-status/kiali-resource-mesh-status.yaml create mode 100644 evals/tasks/kiali-show-topology/kiali-show-topology.yaml create mode 100644 evals/tasks/kiali-status-kiali-istio/kiali-status-kiali-istio.yaml create mode 100644 evals/tasks/kiali-troubleshooting-latency-traces/kiali-troubleshooting-latency-traces.yaml create mode 100644 evals/tasks/kiali-troubleshooting-log/kiali-troubleshooting-log.yaml create mode 100644 evals/tasks/kiali-troubleshooting-trace-lagging/kiali-troubleshooting-trace-lagging.yaml diff --git a/evals/tasks/kiali-istio-create/kiali-istio-create.yaml b/evals/tasks/kiali-istio-create/kiali-istio-create.yaml new file mode 100644 index 00000000..5b7212cf --- /dev/null +++ b/evals/tasks/kiali-istio-create/kiali-istio-create.yaml @@ -0,0 +1,31 @@ +kind: Task +metadata: + name: "Create a gateway" + category: "Istio Configuration & Management" + difficulty: medium +steps: + setup: + inline: |- + #!/usr/bin/env bash + set -euo pipefail + verify: + inline: |- + #!/usr/bin/env bash + set -euo pipefail + NS="istio-system" + NAME="my-gateway" + if kubectl get gw "$NAME" -n "$NS" >/dev/null 2>&1; then + echo "Verified: Gateway '$NAME' exists in namespace '$NS'." + else + echo "Gateway '$NAME' not found in namespace '$NS'." + exit 1 + fi + cleanup: + inline: |- + #!/usr/bin/env bash + set -euo pipefail + NS="istio-system" + NAME="my-gateway" + kubectl delete gw "$NAME" -n "$NS" --ignore-not-found + prompt: + inline: Create a Gateway named my-gateway in the istio-system namespace. \ No newline at end of file diff --git a/evals/tasks/kiali-istio-delete/kiali-istio-delete.yaml b/evals/tasks/kiali-istio-delete/kiali-istio-delete.yaml new file mode 100644 index 00000000..1f9f2e2e --- /dev/null +++ b/evals/tasks/kiali-istio-delete/kiali-istio-delete.yaml @@ -0,0 +1,70 @@ +kind: Task +metadata: + name: "Remove fault Injection" + category: "Istio Configuration & Management" + difficulty: medium +steps: + setup: + inline: |- + #!/usr/bin/env bash + set -euo pipefail + cat <<'EOF' | kubectl apply -f - + apiVersion: networking.istio.io/v1 + kind: DestinationRule + metadata: + namespace: bookinfo + name: ratings + labels: + gevals.kiali.io/test: delete-fault-injection + spec: + host: ratings.bookinfo.svc.cluster.local + subsets: + - name: v1 + labels: + version: v1 + --- + apiVersion: networking.istio.io/v1 + kind: VirtualService + metadata: + namespace: bookinfo + name: ratings + labels: + gevals.kiali.io/test: delete-fault-injection + spec: + hosts: + - ratings.bookinfo.svc.cluster.local + http: + - route: + - destination: + host: ratings.bookinfo.svc.cluster.local + subset: v1 + weight: 100 + fault: + abort: + percentage: + value: 100 + httpStatus: 503 + EOF + verify: + inline: |- + #!/usr/bin/env bash + vs_fault_names="$(kubectl get virtualservice -n "${NAMESPACE}" -o json \ + | jq -r '[.items[] | select(any(.spec.http[]?; has("fault"))) | .metadata.name] | .[]?')" + if [[ -n "${vs_fault_names}" ]]; then + exit 1 + fi + + # Verify DestinationRule 'ratings' does not exist (created during setup) + if kubectl get destinationrule ratings -n "${NAMESPACE}" >/dev/null 2>&1; then + exit 1 + fi + cleanup: + inline: |- + #!/usr/bin/env bash + set -euo pipefail + NS="bookinfo" + LABEL="gevals.kiali.io/test=gevals-testing" + kubectl delete virtualservice -n "$NS" -l "$LABEL" --ignore-not-found + kubectl delete destinationrule -n "$NS" -l "$LABEL" --ignore-not-found + prompt: + inline: Fix my namespace bookinfo to remove the fault injection. \ No newline at end of file diff --git a/evals/tasks/kiali-istio-list/kiali-istio-list.yaml b/evals/tasks/kiali-istio-list/kiali-istio-list.yaml new file mode 100644 index 00000000..b9da60e4 --- /dev/null +++ b/evals/tasks/kiali-istio-list/kiali-istio-list.yaml @@ -0,0 +1,48 @@ +kind: Task +metadata: + name: "List all VS in bookinfo namespace" + category: "Istio Configuration & Management" + difficulty: easy +steps: + setup: + inline: |- + #!/usr/bin/env bash + set -euo pipefail + cat <<'EOF' | kubectl apply -f - + apiVersion: networking.istio.io/v1alpha3 + kind: VirtualService + metadata: + name: productpage-forced-failure + namespace: bookinfo + labels: + gevals.kiali.io/test: delete-fault-injection + spec: + hosts: + - "productpage" + - "productpage-bookinfo.apps-crc.testing" # <--- TU URL EXACTA + gateways: + - bookinfo-gateway + - mesh # Para que afecte también si un microservicio llama a otro internamente + http: + - fault: + abort: + httpStatus: 500 + percentage: + value: 100 + route: + - destination: + host: productpage + subset: v1 + EOF + verify: + inline: |- + #!/usr/bin/env bash + cleanup: + inline: |- + #!/usr/bin/env bash + set -euo pipefail + NS="bookinfo" + LABEL="gevals.kiali.io/test=gevals-testing" + kubectl delete virtualservice -n "$NS" -l "$LABEL" --ignore-not-found + prompt: + inline: List all VirtualServices in the bookinfo namespace and check if they have any validation errors \ No newline at end of file diff --git a/evals/tasks/kiali-istio-patch/kiali-istio-patch.yaml b/evals/tasks/kiali-istio-patch/kiali-istio-patch.yaml new file mode 100644 index 00000000..07c31b05 --- /dev/null +++ b/evals/tasks/kiali-istio-patch/kiali-istio-patch.yaml @@ -0,0 +1,110 @@ +kind: Task +metadata: + name: "Patch my traffic" + category: "Istio Configuration & Management" + difficulty: medium +steps: + setup: + inline: |- + #!/usr/bin/env bash + set -euo pipefail + cat <<'EOF' | kubectl apply -f - + kind: DestinationRule + apiVersion: networking.istio.io/v1 + metadata: + namespace: bookinfo + name: reviews + labels: + gevals.kiali.io/test: gevals-testing + annotations: ~ + spec: + host: reviews.bookinfo.svc.cluster.local + subsets: + - name: v1 + labels: + version: v1 + - name: v2 + labels: + version: v2 + - name: v3 + labels: + version: v3 + trafficPolicy: ~ + + --- + + kind: VirtualService + apiVersion: networking.istio.io/v1 + metadata: + namespace: bookinfo + name: reviews + labels: + gevals.kiali.io/test: gevals-testing + spec: + http: + - route: + - destination: + host: reviews.bookinfo.svc.cluster.local + subset: v1 + weight: 0 + - destination: + host: reviews.bookinfo.svc.cluster.local + subset: v2 + weight: 0 + - destination: + host: reviews.bookinfo.svc.cluster.local + subset: v3 + weight: 100 + hosts: + - reviews.bookinfo.svc.cluster.local + gateways: ~ + EOF + verify: + inline: |- + #!/usr/bin/env bash + set -euo pipefail + NS="bookinfo" + LABEL="gevals.kiali.io/test=gevals-testing" + NAME="reviews" + + if ! command -v jq >/dev/null 2>&1; then + echo "jq is required for verification" + exit 1 + fi + + # Fetch the VirtualService by label and name + vs_json="$(kubectl get virtualservice -n "$NS" -l "$LABEL" -o json)" + found="$(echo "$vs_json" | jq -r --arg name "$NAME" '.items[]? | select(.metadata.name==$name) | .metadata.name' | head -n1)" + if [[ "$found" != "$NAME" ]]; then + echo "VirtualService '$NAME' with label '$LABEL' not found in namespace '$NS'" + exit 1 + fi + + # Verify there is a route to subset v2 with weight 50 + ok="$(echo "$vs_json" | jq -e --arg name "$NAME" ' + .items[]? | select(.metadata.name==$name) + | any(.spec.http[]?.route[]?; (.destination.subset=="v2") and ((.weight // 0) == 50)) + ' >/dev/null && echo yes || echo no)" + + if [[ "$ok" != "yes" ]]; then + echo "VirtualService '$NAME' does not route subset v2 with weight 50" + echo "Current routes:" + echo "$vs_json" | jq -r --arg name "$NAME" ' + .items[]? | select(.metadata.name==$name) + | .spec.http[]?.route[]? | {subset: .destination.subset, weight: .weight} + ' + exit 1 + fi + echo "Verified: VirtualService '$NAME' routes subset v2 with weight 50." + cleanup: + inline: |- + #!/usr/bin/env bash + set -euo pipefail + NS="bookinfo" + LABEL="gevals.kiali.io/test=gevals-testing" + kubectl delete virtualservice -n "$NS" -l "$LABEL" --ignore-not-found + kubectl delete destinationrule -n "$NS" -l "$LABEL" --ignore-not-found + prompt: + inline: I need to shift 50% of traffic to v2 of the reviews service. Apply a patch to the existing VirtualService. + + \ No newline at end of file diff --git a/evals/tasks/kiali-obs-unhealthy-namespaces/kiali-obs-unhealthy-namespaces.yaml b/evals/tasks/kiali-obs-unhealthy-namespaces/kiali-obs-unhealthy-namespaces.yaml new file mode 100644 index 00000000..e0518457 --- /dev/null +++ b/evals/tasks/kiali-obs-unhealthy-namespaces/kiali-obs-unhealthy-namespaces.yaml @@ -0,0 +1,17 @@ +kind: Task +metadata: + name: "Unhealthy Namespaces" + category: "High-Level Observability & Health" + difficulty: easy +steps: + setup: + inline: |- + #!/usr/bin/env bash + verify: + inline: |- + #!/usr/bin/env bash + cleanup: + inline: |- + #!/usr/bin/env bash + prompt: + inline: Are there any unhealthy namespaces in my mesh right now? \ No newline at end of file diff --git a/evals/tasks/kiali-resource-get-namespaces/kiali-resource-get-namespaces.yaml b/evals/tasks/kiali-resource-get-namespaces/kiali-resource-get-namespaces.yaml new file mode 100644 index 00000000..59a1a1a5 --- /dev/null +++ b/evals/tasks/kiali-resource-get-namespaces/kiali-resource-get-namespaces.yaml @@ -0,0 +1,17 @@ +kind: Task +metadata: + name: "Get mesh namespaces" + category: "Resource Inspection" + difficulty: easy +steps: + setup: + inline: |- + #!/usr/bin/env bash + verify: + inline: |- + #!/usr/bin/env bash + cleanup: + inline: |- + #!/usr/bin/env bash + prompt: + inline: Check namespaces in my mesh. \ No newline at end of file diff --git a/evals/tasks/kiali-resource-get-service-detail/kiali-resource-get-service-detail.yaml b/evals/tasks/kiali-resource-get-service-detail/kiali-resource-get-service-detail.yaml new file mode 100644 index 00000000..7db98016 --- /dev/null +++ b/evals/tasks/kiali-resource-get-service-detail/kiali-resource-get-service-detail.yaml @@ -0,0 +1,17 @@ +kind: Task +metadata: + name: "Get service detail" + category: "Resource Inspection" + difficulty: easy +steps: + setup: + inline: |- + #!/usr/bin/env bash + verify: + inline: |- + #!/usr/bin/env bash + cleanup: + inline: |- + #!/usr/bin/env bash + prompt: + inline: Get the full details and health status for the details service \ No newline at end of file diff --git a/evals/tasks/kiali-resource-list-workloads/kiali-resource-list-workloads.yaml b/evals/tasks/kiali-resource-list-workloads/kiali-resource-list-workloads.yaml new file mode 100644 index 00000000..4f426436 --- /dev/null +++ b/evals/tasks/kiali-resource-list-workloads/kiali-resource-list-workloads.yaml @@ -0,0 +1,19 @@ +kind: Task +metadata: + name: "List workloads without sidecar" + category: "Resource Inspection" + difficulty: easy +steps: + setup: + inline: |- + #!/usr/bin/env bash + oc patch deployment details-v1 -n bookinfo -p '{"spec": {"template": {"metadata": {"annotations": {"sidecar.istio.io/inject": "false"}}}}}' + verify: + inline: |- + #!/usr/bin/env bash + cleanup: + inline: |- + #!/usr/bin/env bash + oc patch deployment details-v1 -n bookinfo -p '{"spec": {"template": {"metadata": {"annotations": {"sidecar.istio.io/inject": "true"}}}}}' + prompt: + inline: List all workloads in the bookinfo namespace that have missing sidecars. \ No newline at end of file diff --git a/evals/tasks/kiali-resource-mesh-status/kiali-resource-mesh-status.yaml b/evals/tasks/kiali-resource-mesh-status/kiali-resource-mesh-status.yaml new file mode 100644 index 00000000..c4eb284a --- /dev/null +++ b/evals/tasks/kiali-resource-mesh-status/kiali-resource-mesh-status.yaml @@ -0,0 +1,60 @@ +kind: Task +metadata: + name: "Status of my mesh" + category: "Resource Inspection" + difficulty: easy +steps: + setup: + inline: |- + #!/usr/bin/env bash + set -euo pipefail + cat <<'EOF' | kubectl apply -f - + apiVersion: networking.istio.io/v1 + kind: DestinationRule + metadata: + namespace: bookinfo + name: ratings + labels: + gevals.kiali.io/test: delete-fault-injection + spec: + host: ratings.bookinfo.svc.cluster.local + subsets: + - name: v1 + labels: + version: v1 + --- + apiVersion: networking.istio.io/v1 + kind: VirtualService + metadata: + namespace: bookinfo + name: ratings + labels: + gevals.kiali.io/test: delete-fault-injection + spec: + hosts: + - ratings.bookinfo.svc.cluster.local + http: + - route: + - destination: + host: ratings.bookinfo.svc.cluster.local + subset: v1 + weight: 100 + fault: + abort: + percentage: + value: 100 + httpStatus: 503 + EOF + verify: + inline: |- + #!/usr/bin/env bash + cleanup: + inline: |- + #!/usr/bin/env bash + set -euo pipefail + NS="bookinfo" + LABEL="gevals.kiali.io/test=gevals-testing" + kubectl delete virtualservice -n "$NS" -l "$LABEL" --ignore-not-found + kubectl delete destinationrule -n "$NS" -l "$LABEL" --ignore-not-found + prompt: + inline: Check my mesh. \ No newline at end of file diff --git a/evals/tasks/kiali-show-topology/kiali-show-topology.yaml b/evals/tasks/kiali-show-topology/kiali-show-topology.yaml new file mode 100644 index 00000000..48d02579 --- /dev/null +++ b/evals/tasks/kiali-show-topology/kiali-show-topology.yaml @@ -0,0 +1,17 @@ +kind: Task +metadata: + name: "Show topology bookinfo" + category: "High-Level Observability & Health" + difficulty: easy +steps: + setup: + inline: |- + #!/usr/bin/env bash + verify: + inline: |- + #!/usr/bin/env bash + cleanup: + inline: |- + #!/usr/bin/env bash + prompt: + inline: Show me the topology of the bookinfo namespace. \ No newline at end of file diff --git a/evals/tasks/kiali-status-kiali-istio/kiali-status-kiali-istio.yaml b/evals/tasks/kiali-status-kiali-istio/kiali-status-kiali-istio.yaml new file mode 100644 index 00000000..5f3993fb --- /dev/null +++ b/evals/tasks/kiali-status-kiali-istio/kiali-status-kiali-istio.yaml @@ -0,0 +1,17 @@ +kind: Task +metadata: + name: "Status Kiali and Istio" + category: "High-Level Observability & Health" + difficulty: easy +steps: + setup: + inline: |- + #!/usr/bin/env bash + verify: + inline: |- + #!/usr/bin/env bash + cleanup: + inline: |- + #!/usr/bin/env bash + prompt: + inline: Give me a status report on the interaction between Kiali and Istio components \ No newline at end of file diff --git a/evals/tasks/kiali-troubleshooting-latency-traces/kiali-troubleshooting-latency-traces.yaml b/evals/tasks/kiali-troubleshooting-latency-traces/kiali-troubleshooting-latency-traces.yaml new file mode 100644 index 00000000..91eaefb0 --- /dev/null +++ b/evals/tasks/kiali-troubleshooting-latency-traces/kiali-troubleshooting-latency-traces.yaml @@ -0,0 +1,17 @@ +kind: Task +metadata: + name: "Get latency workload" + category: "Troubleshooting & Debugging" + difficulty: easy +steps: + setup: + inline: |- + #!/usr/bin/env bash + verify: + inline: |- + #!/usr/bin/env bash + cleanup: + inline: |- + #!/usr/bin/env bash + prompt: + inline: Analyze the latency for the reviews workload over the last 30 minutes? \ No newline at end of file diff --git a/evals/tasks/kiali-troubleshooting-log/kiali-troubleshooting-log.yaml b/evals/tasks/kiali-troubleshooting-log/kiali-troubleshooting-log.yaml new file mode 100644 index 00000000..4fc6c423 --- /dev/null +++ b/evals/tasks/kiali-troubleshooting-log/kiali-troubleshooting-log.yaml @@ -0,0 +1,45 @@ +kind: Task +metadata: + name: "Get log productpage due 500" + category: "Troubleshooting & Debugging" + difficulty: easy +steps: + setup: + inline: |- + #!/usr/bin/env bash + set -euo pipefail + cat <<'EOF' | kubectl apply -f - + apiVersion: networking.istio.io/v1alpha3 + kind: VirtualService + metadata: + name: productpage-fault + namespace: bookinfo + labels: + gevals.kiali.io/test: gevals-testing + spec: + hosts: + - "productpage" + - "productpage.bookinfo.svc.cluster.local" + - "productpage-bookinfo.apps-crc.testing" # <--- ¡AQUÍ ESTÁ LA CLAVE! + http: + - fault: + abort: + httpStatus: 500 + percentage: + value: 100 + route: + - destination: + host: productpage + EOF + verify: + inline: |- + #!/usr/bin/env bash + cleanup: + inline: |- + #!/usr/bin/env bash + set -euo pipefail + NS="bookinfo" + LABEL="gevals.kiali.io/test=gevals-testing" + kubectl delete virtualservice -n "$NS" -l "$LABEL" --ignore-not-found + prompt: + inline: Why is the productpage service returning 500 errors? \ No newline at end of file diff --git a/evals/tasks/kiali-troubleshooting-trace-lagging/kiali-troubleshooting-trace-lagging.yaml b/evals/tasks/kiali-troubleshooting-trace-lagging/kiali-troubleshooting-trace-lagging.yaml new file mode 100644 index 00000000..969c4e86 --- /dev/null +++ b/evals/tasks/kiali-troubleshooting-trace-lagging/kiali-troubleshooting-trace-lagging.yaml @@ -0,0 +1,69 @@ +kind: Task +metadata: + name: "Check traces for a service" + category: "Troubleshooting & Debugging" + difficulty: easy +steps: + setup: + inline: |- + #!/usr/bin/env bash + set -euo pipefail + cat <<'EOF' | kubectl apply -f - + kind: DestinationRule + apiVersion: networking.istio.io/v1 + metadata: + namespace: bookinfo + name: ratings + labels: + gevals.kiali.io/test: delete-fault-injection + annotations: ~ + spec: + host: ratings.bookinfo.svc.cluster.local + subsets: + - name: v1 + labels: + version: v1 + trafficPolicy: ~ + + --- + + kind: VirtualService + apiVersion: networking.istio.io/v1 + metadata: + namespace: bookinfo + name: ratings + labels: + gevals.kiali.io/test: delete-fault-injection + spec: + http: + - route: + - destination: + host: ratings.bookinfo.svc.cluster.local + subset: v1 + weight: 100 + fault: + delay: + percentage: + value: 100 + fixedDelay: 5s + hosts: + - ratings.bookinfo.svc.cluster.local + gateways: ~ + EOF + verify: + inline: |- + #!/usr/bin/env bash + cleanup: + inline: |- + #!/usr/bin/env bash + set -euo pipefail + NS="bookinfo" + LABEL="gevals.kiali.io/test=gevals-testing" + kubectl delete virtualservice -n "$NS" -l "$LABEL" --ignore-not-found + kubectl delete destinationrule -n "$NS" -l "$LABEL" --ignore-not-found + prompt: + inline: I see a spike in duration for ratings. Can you check the traces to see which span is lagging? + + + + \ No newline at end of file